diff --git a/setup.py b/setup.py index 4f689ea..5f23f8c 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setuptools.setup( name="smile-datasets", - version="0.0.2", + version="0.0.3", description="La**S**t **mile** datasets: Use `tf.data` to solve the last mile data loading problem for tensorflow.", long_description=long_description, long_description_content_type="text/markdown", diff --git a/smile_datasets/__init__.py b/smile_datasets/__init__.py index 2fd64f2..a82eb3f 100644 --- a/smile_datasets/__init__.py +++ b/smile_datasets/__init__.py @@ -18,6 +18,6 @@ from smile_datasets.token_classification.tokenizers import BertCharLevelTokenizer, LabelTokenizerForTokenClassification __name__ = "smile_datasets" -__version__ = "0.0.2" +__version__ = "0.0.3" logging.basicConfig(format="%(asctime)s %(levelname)7s %(filename)20s %(lineno)4d] %(message)s", level=logging.INFO)