Skip to content

Commit

Permalink
Bump version to 0.6.1 (#326)
Browse files Browse the repository at this point in the history
* Bump version to 0.6.1.

* Re-disable Llama 2 tests.
  • Loading branch information
rmitsch authored Oct 13, 2023
1 parent 89b5b65 commit bfd862a
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[metadata]
version = 0.6.0
version = 0.6.1
description = Integrating LLMs into structured NLP pipelines
author = Explosion
author_email = [email protected]
Expand Down
6 changes: 3 additions & 3 deletions spacy_llm/tests/models/test_llama2.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
"""


# @pytest.mark.skip(reason="CI runner needs more GPU memory")
@pytest.mark.skip(reason="CI runner needs more GPU memory")
@pytest.mark.gpu
@pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
def test_init():
Expand All @@ -52,7 +52,7 @@ def test_init():
)


# @pytest.mark.skip(reason="CI runner needs more GPU memory")
@pytest.mark.skip(reason="CI runner needs more GPU memory")
@pytest.mark.gpu
@pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
def test_init_from_config():
Expand All @@ -62,7 +62,7 @@ def test_init_from_config():
torch.cuda.empty_cache()


# @pytest.mark.skip(reason="CI runner needs more GPU memory")
@pytest.mark.skip(reason="CI runner needs more GPU memory")
@pytest.mark.gpu
@pytest.mark.skipif(not has_torch_cuda_gpu, reason="needs GPU & CUDA")
def test_invalid_model():
Expand Down

0 comments on commit bfd862a

Please sign in to comment.