From bfd862aa00893e14ccbb2d49cbc764820b842eb4 Mon Sep 17 00:00:00 2001 From: Raphael Mitsch Date: Fri, 13 Oct 2023 19:36:17 +0200 Subject: [PATCH] Bump version to 0.6.1 (#326) * Bump version to 0.6.1. * Re-disable Llama 2 tests. --- setup.cfg | 2 +- spacy_llm/tests/models/test_llama2.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index 2393aee9..24d57974 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [metadata] -version = 0.6.0 +version = 0.6.1 description = Integrating LLMs into structured NLP pipelines author = Explosion author_email = contact@explosion.ai diff --git a/spacy_llm/tests/models/test_llama2.py b/spacy_llm/tests/models/test_llama2.py index 9debc2d7..6896269b 100644 --- a/spacy_llm/tests/models/test_llama2.py +++ b/spacy_llm/tests/models/test_llama2.py @@ -37,7 +37,7 @@ """ -# @pytest.mark.skip(reason="CI runner needs more GPU memory") +@pytest.mark.skip(reason="CI runner needs more GPU memory") @pytest.mark.gpu @pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA") def test_init(): @@ -52,7 +52,7 @@ def test_init(): ) -# @pytest.mark.skip(reason="CI runner needs more GPU memory") +@pytest.mark.skip(reason="CI runner needs more GPU memory") @pytest.mark.gpu @pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA") def test_init_from_config(): @@ -62,7 +62,7 @@ def test_init_from_config(): torch.cuda.empty_cache() -# @pytest.mark.skip(reason="CI runner needs more GPU memory") +@pytest.mark.skip(reason="CI runner needs more GPU memory") @pytest.mark.gpu @pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA") def test_invalid_model():