diff --git a/google/colab/_dataframe_summarizer.py b/google/colab/_dataframe_summarizer.py index 74ffd346..76e43a19 100644 --- a/google/colab/_dataframe_summarizer.py +++ b/google/colab/_dataframe_summarizer.py @@ -72,7 +72,7 @@ def _summarize_columns(df: pd.DataFrame, n_samples: int = 3): properties["dtype"] = "string" except TypeError: properties["dtype"] = str(dtype) - elif pd.api.types.is_categorical_dtype(column): + elif isinstance(column, pd.CategoricalDtype): properties["dtype"] = "category" elif pd.api.types.is_datetime64_any_dtype(column): properties["dtype"] = "date" diff --git a/notebooks/Gemma_Distributed_Fine_tuning_on_TPU.ipynb b/notebooks/Gemma_Distributed_Fine_tuning_on_TPU.ipynb index 2db9079a..892e62cb 100644 --- a/notebooks/Gemma_Distributed_Fine_tuning_on_TPU.ipynb +++ b/notebooks/Gemma_Distributed_Fine_tuning_on_TPU.ipynb @@ -48,7 +48,7 @@ "source": [ "## Overview\n", "\n", - "Gemma is a family of lightweight, state-of-the-art open models built from research and technology used to create Google Gemini models. Gemma can be further finetuned to suit specific needs. But Large Language Models, such as Gemma, can be very large in size and some of them may not fit on a single accelerator for finetuning. In this case there are two general approaches for finetuning them:\n", + "Gemma is a family of lightweight, state-of-the-art open models built from research and technology used to create Google Gemini models. Gemma can be further finetuned to suit specific needs. But Large Language Models, such as Gemma, can be very large in size and some of them may not fit on a sing accelerator for finetuning. In this case there are two general approaches for finetuning them:\n", "1. Parameter Efficient Fine-Tuning (PEFT), which seeks to shrink the effective model size by sacrificing some fidelity. LoRA falls in this category and the [Fine-tune Gemma models in Keras using LoRA](https://ai.google.dev/gemma/docs/lora_tuning) tutorial demonstrates how to finetune the Gemma 7B model `gemma_instruct_7b_en` with LoRA using KerasNLP on a single GPU.\n", "2. Full parameter finetuning with model parallelism. Model parallelism distributes a single model's weights across multiple devices and enables horizontal scaling. You can find out more about distributed training in this [Keras guide](https://keras.io/guides/distribution/).\n", "\n", @@ -4232,4 +4232,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file