Skip to content

Commit 3f36d81

Browse files
committed
infra: docs and normalize formatting
Signed-off-by: aarnphm-ec2-dev <[email protected]>
1 parent 8ca488d commit 3f36d81

File tree

4 files changed

+61
-67
lines changed

4 files changed

+61
-67
lines changed

pyproject.toml

+48-59
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,8 @@
11
[build-system]
2-
requires = ["hatchling"]
32
build-backend = "hatchling.build"
3+
requires = ["hatchling"]
44

55
[project]
6-
name = "openllm"
7-
dynamic = ["version"]
8-
description = 'OpenLLM: REST/gRPC API server for running any open Large-Language Model - StableLM, Llama, Alpaca, Dolly, Flan-T5, Custom'
9-
readme = "README.md"
10-
requires-python = ">=3.8"
11-
license = "Apache-2.0"
12-
keywords = [
13-
"MLOps",
14-
"AI",
15-
"BentoML",
16-
"Model Serving",
17-
"Model Deployment",
18-
"LLMOps",
19-
"Large Language Model",
20-
"Generative AI",
21-
"Stable Diffusion",
22-
"StableLM",
23-
"Alpaca",
24-
"PyTorch",
25-
"Transformers",
26-
]
276
authors = [
287
{ name = "Aaron Pham", email = "[email protected]" },
298
{ name = "BentoML Team", email = "[email protected]" },
@@ -57,7 +36,7 @@ dependencies = [
5736
"grpcio-reflection",
5837
"httpx[http2]",
5938
# transformers[torch] includes torch and transformers
60-
"transformers[torch,accelerate,tokenizers,onnxruntime,onnx]>=4.29.0",
39+
"transformers[torch,accelerate,tokenizers,onnxruntime,onnx,optimum]>=4.29.0",
6140
# Super fast JSON serialization
6241
"orjson",
6342
"inflection",
@@ -66,18 +45,33 @@ dependencies = [
6645
# black for generating service file.
6746
"black[jupyter]==23.3.0",
6847
]
48+
description = 'OpenLLM: REST/gRPC API server for running any open Large-Language Model - StableLM, Llama, Alpaca, Dolly, Flan-T5, Custom'
49+
dynamic = ["version"]
50+
keywords = [
51+
"MLOps",
52+
"AI",
53+
"BentoML",
54+
"Model Serving",
55+
"Model Deployment",
56+
"LLMOps",
57+
"Large Language Model",
58+
"Generative AI",
59+
"Stable Diffusion",
60+
"StableLM",
61+
"Alpaca",
62+
"PyTorch",
63+
"Transformers",
64+
]
65+
license = "Apache-2.0"
66+
name = "openllm"
67+
readme = "README.md"
68+
requires-python = ">=3.8"
6969

7070
[project.optional-dependencies]
71-
all = [
72-
'openllm[fine-tune]',
73-
'openllm[chatglm]',
74-
'openllm[falcon]',
75-
'openllm[flan-t5]',
76-
'openllm[starcoder]',
77-
]
78-
fine-tune = ["peft", "bitsandbytes", "datasets"]
71+
all = ['openllm[fine-tune]', 'openllm[chatglm]', 'openllm[falcon]', 'openllm[flan-t5]', 'openllm[starcoder]']
7972
chatglm = ['cpm_kernels', 'sentencepiece']
8073
falcon = ['einops']
74+
fine-tune = ["peft", "bitsandbytes", "datasets"]
8175
flan-t5 = ['flax', 'jax', 'jaxlib', 'tensorflow']
8276
starcoder = ['bitsandbytes']
8377

@@ -108,38 +102,31 @@ dependencies = [
108102
"pre-commit",
109103
]
110104
[tool.hatch.envs.default.scripts]
105+
cov = ["test-cov", "cov-report"]
106+
cov-report = ["- coverage combine", "coverage report"]
111107
setup = "pre-commit install"
112108
test = "pytest {args:tests}"
113109
test-cov = "coverage run -m pytest {args:tests}"
114-
cov-report = ["- coverage combine", "coverage report"]
115-
cov = ["test-cov", "cov-report"]
116110

117111
[[tool.hatch.envs.all.matrix]]
118112
python = ["3.8", "3.9", "3.10", "3.11"]
119113

120114
[tool.hatch.envs.dev]
121-
detached = true
122115
dependencies = ["ruff>=0.0.243", "pyright", "hatch"]
116+
detached = true
123117

124118
[tool.hatch.envs.dev.scripts]
125-
typing = "pyright {args:src/openllm tests}"
126-
style = ["ruff {args:.}", "black --check --diff {args:.}"]
127-
fmt = [
128-
"black {args:.}",
129-
"black --pyi {args:typings/}",
130-
"ruff --fix {args:.}",
131-
"style",
132-
]
133119
all = ["fmt", "typing"]
120+
fmt = ["black {args:.}", "black --pyi {args:typings/}", "ruff --fix {args:.}", "style"]
121+
style = ["ruff {args:.}", "black --check --diff {args:.}"]
122+
typing = "pyright {args:src/openllm tests}"
134123

135124
[tool.pytest.ini_options]
136125
addopts = ["-rfEX", "-pno:warnings"]
137126
python_files = ["test_*.py", "*_test.py"]
138127
testpaths = ["tests"]
139128

140129
[tool.black]
141-
target-version = ["py311"]
142-
line-length = 120
143130
exclude = '''
144131
(
145132
/(
@@ -158,10 +145,10 @@ exclude = '''
158145
| src/openllm/__about__.py
159146
)
160147
'''
148+
line-length = 120
149+
target-version = ["py311"]
161150

162151
[tool.ruff]
163-
target-version = "py311"
164-
line-length = 120
165152
ignore = [
166153
# Allow non-abstract empty methods in abstract base classes
167154
"B027",
@@ -178,6 +165,8 @@ ignore = [
178165
"PLR0913",
179166
"PLR0915",
180167
]
168+
line-length = 120
169+
target-version = "py311"
181170
unfixable = [
182171
"F401", # Don't touch unused imports, just warn about it.
183172
]
@@ -186,8 +175,8 @@ unfixable = [
186175
convention = "google"
187176

188177
[tool.ruff.isort]
189-
lines-after-imports = 2
190178
known-first-party = ["openllm", "bentoml", 'transformers']
179+
lines-after-imports = 2
191180

192181
[tool.ruff.flake8-quotes]
193182
inline-quotes = "single"
@@ -197,31 +186,31 @@ ban-relative-imports = "all"
197186

198187
[tool.ruff.per-file-ignores]
199188
# Tests can use magic values, assertions, and relative imports
200-
"tests/**/*" = ["PLR2004", "S101", "TID252"]
201189
"__init__.py" = ["E402", "F401", "F403", "F811"]
190+
"tests/**/*" = ["PLR2004", "S101", "TID252"]
202191

203192
[tool.pyright]
204-
pythonVersion = "3.11"
205-
include = ["src/", "tests/"]
206193
analysis.useLibraryCodeForTypes = true
207-
typeCheckingMode = "strict"
208-
strictListInference = true
209-
strictDictionaryInference = true
210-
strictSetInference = true
211-
strictParameterNoneValue = true
212194
enableTypeIgnoreComments = true
195+
include = ["src/", "tests/"]
196+
pythonVersion = "3.11"
213197
reportMissingImports = "none"
214-
reportMissingTypeStubs = "warning"
215198
reportMissingModuleSource = "warning"
216-
reportUnknownVariableType = "warning"
199+
reportMissingTypeStubs = "warning"
217200
reportUnknownMemberType = "warning"
201+
reportUnknownVariableType = "warning"
202+
strictDictionaryInference = true
203+
strictListInference = true
204+
strictParameterNoneValue = true
205+
strictSetInference = true
206+
typeCheckingMode = "strict"
218207

219208

220209
[tool.coverage.run]
221-
source_pkgs = ["openllm", "tests"]
222210
branch = true
223-
parallel = true
224211
omit = ["src/openllm/__about__.py"]
212+
parallel = true
213+
source_pkgs = ["openllm", "tests"]
225214

226215
[tool.coverage.paths]
227216
openllm = ["src/openllm", "*/openllm/src/openllm"]

src/openllm/_configuration.py

+5-7
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ class GenerationConfig(pydantic.BaseModel):
127127
"""Generation config provides the configuration to then be parsed to ``transformers.GenerationConfig``,
128128
with some additional validation and environment constructor.
129129
130-
Note that we always set `do_sample=True` and `return_dict_in_generate=False`
130+
Note that we always set `do_sample=True`
131131
"""
132132

133133
# NOTE: parameters for controlling the length of the output
@@ -146,12 +146,10 @@ class GenerationConfig(pydantic.BaseModel):
146146
early_stopping: bool = pydantic.Field(
147147
False,
148148
description="""Controls the stopping condition for beam-based methods, like beam-search. It accepts the
149-
following values:
150-
- `True`, where the generation stops as soon as there are `num_beams` complete candidates;
151-
- `False`, where an heuristic is applied and the generation stops when is it very unlikely to find
152-
better candidates;
153-
- `"never"`, where the beam search procedure only stops when there cannot be better candidates
154-
(canonical beam search algorithm)
149+
following values: `True`, where the generation stops as soon as there are `num_beams` complete candidates;
150+
`False`, where an heuristic is applied and the generation stops when is it very unlikely to find
151+
better candidates; `"never"`, where the beam search procedure only stops when there
152+
cannot be better candidates (canonical beam search algorithm)
155153
""",
156154
)
157155
max_time: float = pydantic.Field(

src/openllm/cli.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -497,7 +497,7 @@ def cli():
497497
@cli.command(name="version")
498498
@output_decorator
499499
def version(output: t.Literal["json", "pretty", "porcelain"]):
500-
"""Return current OpenLLM version."""
500+
"""🚀 OpenLLM version."""
501501
if output == "pretty":
502502
_console.print(f"OpenLLM version: {openllm.__version__}")
503503
elif output == "json":

taplo.toml

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
include = ['*.toml']
2+
3+
[formatting]
4+
align_entries = false
5+
column_width = 120
6+
indent_string = " "
7+
reorder_keys = true

0 commit comments

Comments
 (0)