From cc41269135b9da5c73606e3d18504a1873c8f8d4 Mon Sep 17 00:00:00 2001 From: Mark Susol Date: Sun, 17 Mar 2024 21:49:09 -0600 Subject: [PATCH] fix collision with datasets import Issue found while running the colab today. --- examples/token_classification-tf.ipynb | 20 ++++++++++---------- examples/token_classification.ipynb | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/examples/token_classification-tf.ipynb b/examples/token_classification-tf.ipynb index ed8ede07..74a2cdfb 100644 --- a/examples/token_classification-tf.ipynb +++ b/examples/token_classification-tf.ipynb @@ -300,7 +300,7 @@ } ], "source": [ - "datasets = load_dataset(\"conll2003\")" + "conll = load_dataset(\"conll2003\")" ] }, { @@ -309,7 +309,7 @@ "id": "RzfPtOMoIrIu" }, "source": [ - "The `datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict), which contains one key for the training, validation and test set." + "The `conll` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict), which contains one key for the training, validation and test set." ] }, { @@ -345,7 +345,7 @@ } ], "source": [ - "datasets" + "conll" ] }, { @@ -396,7 +396,7 @@ } ], "source": [ - "datasets[\"train\"][0]" + "conll[\"train\"][0]" ] }, { @@ -423,7 +423,7 @@ } ], "source": [ - "datasets[\"train\"].features[f\"ner_tags\"]" + "conll[\"train\"].features[f\"ner_tags\"]" ] }, { @@ -461,7 +461,7 @@ } ], "source": [ - "label_list = datasets[\"train\"].features[f\"{task}_tags\"].feature.names\n", + "label_list = conll[\"train\"].features[f\"{task}_tags\"].feature.names\n", "label_list" ] }, @@ -626,7 +626,7 @@ } ], "source": [ - "show_random_elements(datasets[\"train\"])" + "show_random_elements(conll[\"train\"])" ] }, { @@ -779,7 +779,7 @@ } ], "source": [ - "example = datasets[\"train\"][4]\n", + "example = conll[\"train\"][4]\n", "print(example[\"tokens\"])" ] }, @@ -973,7 +973,7 @@ } ], "source": [ - "tokenize_and_align_labels(datasets[\"train\"][:5])" + "tokenize_and_align_labels(conll[\"train\"][:5])" ] }, { @@ -1004,7 +1004,7 @@ } ], "source": [ - "tokenized_datasets = datasets.map(tokenize_and_align_labels, batched=True)" + "tokenized_datasets = conll.map(tokenize_and_align_labels, batched=True)" ] }, { diff --git a/examples/token_classification.ipynb b/examples/token_classification.ipynb index 3fb038c3..82ab1031 100644 --- a/examples/token_classification.ipynb +++ b/examples/token_classification.ipynb @@ -273,7 +273,7 @@ } ], "source": [ - "datasets = load_dataset(\"conll2003\")" + "conll = load_dataset(\"conll2003\")" ] }, { @@ -282,7 +282,7 @@ "id": "RzfPtOMoIrIu" }, "source": [ - "The `datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict), which contains one key for the training, validation and test set." + "The `conll` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasetdict), which contains one key for the training, validation and test set." ] }, { @@ -318,7 +318,7 @@ } ], "source": [ - "datasets" + "conll" ] }, { @@ -369,7 +369,7 @@ } ], "source": [ - "datasets[\"train\"][0]" + "conll[\"train\"][0]" ] }, { @@ -396,7 +396,7 @@ } ], "source": [ - "datasets[\"train\"].features[f\"ner_tags\"]" + "conll[\"train\"].features[f\"ner_tags\"]" ] }, { @@ -434,7 +434,7 @@ } ], "source": [ - "label_list = datasets[\"train\"].features[f\"{task}_tags\"].feature.names\n", + "label_list = conll[\"train\"].features[f\"{task}_tags\"].feature.names\n", "label_list" ] }, @@ -594,7 +594,7 @@ } ], "source": [ - "show_random_elements(datasets[\"train\"])" + "show_random_elements(conll[\"train\"])" ] }, { @@ -743,7 +743,7 @@ } ], "source": [ - "example = datasets[\"train\"][4]\n", + "example = conll[\"train\"][4]\n", "print(example[\"tokens\"])" ] }, @@ -935,7 +935,7 @@ } ], "source": [ - "tokenize_and_align_labels(datasets['train'][:5])" + "tokenize_and_align_labels(conll['train'][:5])" ] }, { @@ -966,7 +966,7 @@ } ], "source": [ - "tokenized_datasets = datasets.map(tokenize_and_align_labels, batched=True)" + "tokenized_datasets = conll.map(tokenize_and_align_labels, batched=True)" ] }, {