forked from meta-llama/llama-recipes
-
Notifications
You must be signed in to change notification settings - Fork 1
/
pyproject.toml
47 lines (40 loc) · 1.46 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
[build-system]
requires = ["hatchling", "hatch-requirements-txt"]
build-backend = "hatchling.build"
[project]
name = "llama-recipes"
version = "0.0.4.post1"
authors = [
{ name="Hamid Shojanazeri", email="[email protected]" },
{ name="Matthias Reso", email="[email protected]" },
{ name="Geeta Chauhan", email="[email protected]" },
]
description = "Llama-recipes is a companion project to the Llama models. It's goal is to provide examples to quickly get started with fine-tuning for domain adaptation and how to run inference for the fine-tuned models."
readme = "README.md"
requires-python = ">=3.8"
classifiers = [
"Programming Language :: Python :: 3",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
]
dynamic = ["dependencies"]
[project.optional-dependencies]
vllm = ["vllm"]
tests = ["pytest-mock"]
auditnlg = ["auditnlg"]
langchain = ["langchain_openai", "langchain", "langchain_community"]
[project.urls]
"Homepage" = "https://github.com/facebookresearch/llama-recipes/"
"Bug Tracker" = "https://github.com/facebookresearch/llama-recipes/issues"
[tool.hatch.build]
exclude = [
"dist/*",
]
[tool.hatch.build.targets.wheel]
packages = ["src/llama_recipes"]
[tool.hatch.metadata.hooks.requirements_txt]
files = ["requirements.txt"]
[tool.pytest.ini_options]
markers = [
"skip_missing_tokenizer: skip tests when we can not access meta-llama/Llama-2-7b-hf on huggingface hub (Log in with `huggingface-cli login` to unskip).",
]