Skip to content

Commit 1b61d15

Browse files
committed
ci: Uncap torch, allowing it to bump to 2.7.1 in constraints
This also bring newer CUDA into CI. Signed-off-by: Ihar Hrachyshka <[email protected]>
1 parent 0c738f3 commit 1b61d15

File tree

2 files changed

+26
-34
lines changed

2 files changed

+26
-34
lines changed

constraints-dev.txt

Lines changed: 26 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ babel==2.17.0 # via jupyterlab-server
1717
beautifulsoup4==4.13.4 # via nbconvert
1818
bitsandbytes==0.46.0 # via -r requirements-cuda.txt
1919
bleach==6.2.0 # via nbconvert
20-
cachetools==6.0.0 # via tox
20+
cachetools==6.1.0 # via tox
2121
certifi==2025.6.15 # via httpcore, httpx, requests, sentry-sdk
2222
cffi==1.17.1 # via argon2-cffi-bindings
2323
cfgv==3.4.0 # via pre-commit
@@ -39,7 +39,7 @@ einops==0.8.1 # via deepspeed, flash-attn
3939
executing==2.2.0 # via stack-data
4040
fastjsonschema==2.21.1 # via nbformat
4141
filelock==3.18.0 # via datasets, huggingface-hub, torch, tox, transformers, virtualenv
42-
flash-attn==2.7.4.post1 # via -r requirements-cuda.txt, -r requirements-rocm.txt
42+
flash-attn==2.8.0.post2 # via -r requirements-cuda.txt, -r requirements-rocm.txt
4343
fonttools==4.58.4 # via matplotlib
4444
fqdn==1.5.1 # via jsonschema
4545
frozenlist==1.7.0 # via aiohttp, aiosignal
@@ -48,7 +48,7 @@ gitdb==4.0.12 # via gitpython
4848
gitpython==3.1.44 # via wandb
4949
grpcio==1.73.0 # via tensorboard
5050
h11==0.16.0 # via httpcore
51-
hf-xet==1.1.3 # via huggingface-hub
51+
hf-xet==1.1.4 # via huggingface-hub
5252
hjson==3.1.0 # via deepspeed
5353
httpcore==1.0.9 # via httpx
5454
httpx==0.28.1 # via jupyterlab
@@ -94,9 +94,9 @@ mdurl==0.1.2 # via markdown-it-py
9494
mistune==3.1.3 # via nbconvert
9595
mpmath==1.3.0 # via sympy
9696
msgpack==1.1.1 # via deepspeed
97-
multidict==6.4.4 # via aiohttp, yarl
97+
multidict==6.5.0 # via aiohttp, yarl
9898
multiprocess==0.70.16 # via datasets
99-
mypy==1.16.0 # via -r requirements-dev.txt
99+
mypy==1.16.1 # via -r requirements-dev.txt
100100
mypy-extensions==1.1.0 # via mypy
101101
nbclient==0.10.2 # via nbconvert
102102
nbconvert==7.16.6 # via jupyter, jupyter-server
@@ -109,19 +109,20 @@ notebook==7.4.3 # via jupyter
109109
notebook-shim==0.2.4 # via jupyterlab, notebook
110110
numba==0.61.2 # via -r requirements.txt
111111
numpy==1.26.4 # via accelerate, bitsandbytes, contourpy, datasets, deepspeed, matplotlib, numba, pandas, peft, tensorboard, transformers, -r requirements-dev.txt, -r requirements.txt
112-
nvidia-cublas-cu12==12.4.5.8 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch
113-
nvidia-cuda-cupti-cu12==12.4.127 # via torch
114-
nvidia-cuda-nvrtc-cu12==12.4.127 # via torch
115-
nvidia-cuda-runtime-cu12==12.4.127 # via torch
116-
nvidia-cudnn-cu12==9.1.0.70 # via torch
117-
nvidia-cufft-cu12==11.2.1.3 # via torch
118-
nvidia-curand-cu12==10.3.5.147 # via torch
119-
nvidia-cusolver-cu12==11.6.1.9 # via torch
120-
nvidia-cusparse-cu12==12.3.1.170 # via nvidia-cusolver-cu12, torch
121-
nvidia-cusparselt-cu12==0.6.2 # via torch
122-
nvidia-nccl-cu12==2.21.5 # via torch
123-
nvidia-nvjitlink-cu12==12.4.127 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12, torch
124-
nvidia-nvtx-cu12==12.4.127 # via torch
112+
nvidia-cublas-cu12==12.6.4.1 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch
113+
nvidia-cuda-cupti-cu12==12.6.80 # via torch
114+
nvidia-cuda-nvrtc-cu12==12.6.77 # via torch
115+
nvidia-cuda-runtime-cu12==12.6.77 # via torch
116+
nvidia-cudnn-cu12==9.5.1.17 # via torch
117+
nvidia-cufft-cu12==11.3.0.4 # via torch
118+
nvidia-cufile-cu12==1.11.1.6 # via torch
119+
nvidia-curand-cu12==10.3.7.77 # via torch
120+
nvidia-cusolver-cu12==11.7.1.2 # via torch
121+
nvidia-cusparse-cu12==12.5.4.2 # via nvidia-cusolver-cu12, torch
122+
nvidia-cusparselt-cu12==0.6.3 # via torch
123+
nvidia-nccl-cu12==2.26.2 # via torch
124+
nvidia-nvjitlink-cu12==12.6.85 # via nvidia-cufft-cu12, nvidia-cusolver-cu12, nvidia-cusparse-cu12, torch
125+
nvidia-nvtx-cu12==12.6.77 # via torch
125126
overrides==7.7.0 # via jupyter-server
126127
packaging==25.0 # via accelerate, datasets, deepspeed, huggingface-hub, ipykernel, jupyter-events, jupyter-server, jupyterlab, jupyterlab-server, matplotlib, nbconvert, peft, pyproject-api, pytest, tensorboard, tox, transformers, wandb, -r requirements.txt
127128
pandas==2.3.0 # via datasets
@@ -152,7 +153,7 @@ pylint-plugin-utils==0.8.2 # via pylint-pydantic
152153
pylint-pydantic==0.3.5 # via -r requirements-dev.txt
153154
pyparsing==3.2.3 # via matplotlib
154155
pyproject-api==1.9.1 # via tox
155-
pytest==8.4.0 # via -r requirements-dev.txt
156+
pytest==8.4.1 # via -r requirements-dev.txt
156157
python-dateutil==2.9.0.post0 # via arrow, jupyter-client, matplotlib, pandas
157158
python-json-logger==3.3.0 # via jupyter-events
158159
pytz==2025.2 # via pandas
@@ -165,32 +166,32 @@ rfc3339-validator==0.1.4 # via jsonschema, jupyter-events
165166
rfc3986-validator==0.1.1 # via jsonschema, jupyter-events
166167
rich==14.0.0 # via -r requirements.txt
167168
rpds-py==0.25.1 # via jsonschema, referencing
168-
ruff==0.11.13 # via -r requirements-dev.txt
169+
ruff==0.12.0 # via -r requirements-dev.txt
169170
safetensors==0.5.3 # via accelerate, instructlab-dolomite, peft, transformers
170171
send2trash==1.8.3 # via jupyter-server
171172
sentry-sdk==2.30.0 # via wandb
172173
setproctitle==1.3.6 # via wandb
173-
setuptools==80.9.0 # via jupyterlab, tensorboard
174+
setuptools==80.9.0 # via jupyterlab, tensorboard, triton
174175
six==1.17.0 # via python-dateutil, rfc3339-validator, tensorboard
175176
smmap==5.0.2 # via gitdb
176177
sniffio==1.3.1 # via anyio
177178
soupsieve==2.7 # via beautifulsoup4
178179
stack-data==0.6.3 # via ipython
179-
sympy==1.13.1 # via torch
180+
sympy==1.14.0 # via torch
180181
tensorboard==2.19.0 # via -r requirements-dev.txt
181182
tensorboard-data-server==0.7.2 # via tensorboard
182183
terminado==0.18.1 # via jupyter-server, jupyter-server-terminals
183184
tinycss2==1.4.0 # via bleach
184185
tokenizers==0.21.1 # via transformers
185186
tomlkit==0.13.3 # via pylint
186-
torch==2.6.0 # via accelerate, bitsandbytes, deepspeed, flash-attn, instructlab-dolomite, liger-kernel, peft, -c constraints-dev.txt.in, -r requirements.txt
187+
torch==2.7.1 # via accelerate, bitsandbytes, deepspeed, flash-attn, instructlab-dolomite, liger-kernel, peft, -r requirements.txt
187188
tornado==6.5.1 # via ipykernel, jupyter-client, jupyter-server, jupyterlab, notebook, terminado
188-
tox==4.26.0 # via tox-current-env, -r requirements-dev.txt
189+
tox==4.27.0 # via tox-current-env, -r requirements-dev.txt
189190
tox-current-env==0.0.16 # via -r requirements-dev.txt
190191
tqdm==4.67.1 # via datasets, deepspeed, huggingface-hub, peft, transformers
191192
traitlets==5.14.3 # via comm, ipykernel, ipython, ipywidgets, jupyter-client, jupyter-console, jupyter-core, jupyter-events, jupyter-server, jupyterlab, matplotlib-inline, nbclient, nbconvert, nbformat
192193
transformers==4.52.4 # via instructlab-dolomite, peft, trl, -r requirements.txt
193-
triton==3.2.0 # via liger-kernel, torch
194+
triton==3.3.1 # via liger-kernel, torch
194195
trl==0.18.2 # via -r requirements.txt
195196
types-python-dateutil==2.9.0.20250516 # via arrow
196197
types-pyyaml==6.0.12.20250516 # via -r requirements-dev.txt

constraints-dev.txt.in

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1 @@
11
# SPDX-License-Identifier: Apache-2.0
2-
3-
# These are synchronized with instructlab repo; we have to keep them in sync at
4-
# least until we no longer tie training repo CI with ilab repo through e2e jobs.
5-
torch<2.7.0
6-
vllm<0.9.0
7-
8-
# flash-attn 2.8.0+ is broken for torch 2.6.x.
9-
# See: https://github.com/Dao-AILab/flash-attention/issues/1717
10-
flash-attn<2.8.0

0 commit comments

Comments
 (0)