Skip to content

Optimum TPU / Test TGI on TPU (slow tests) #146

Optimum TPU / Test TGI on TPU (slow tests)

Optimum TPU / Test TGI on TPU (slow tests) #146

name: Optimum TPU / Test TGI on TPU (slow tests)
on:
# This can be used to automatically publish nightlies at UTC nighttime
schedule:
- cron: '0 2 * * *' # run at 2 AM UTC
# This can be used to allow manually triggering nightlies from the web interface
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
do-the-job:
name: Build and Run slow tests
runs-on: optimum-tpu
container:
# Use a nightly image that works with TPU (release was not working)
image: us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla:r2.4.0_3.10_tpuvm
options: --shm-size "16gb" --ipc host --privileged
env:
PJRT_DEVICE: TPU
HF_TOKEN: ${{ secrets.HF_TOKEN_OPTIMUM_TPU_CI }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build and test Optimum TPU (also slow tests)
run: |
python -m pip install build
make build_dist test_installs
python -m pytest --runslow -sv tests
- name: Build and test TGI (also slow tests)
run: |
make tgi_server test_installs
find text-generation-inference/ -name "text_generation_server-*whl" -exec python -m pip install {} \;
python -m pytest --runslow -sv text-generation-inference/tests
# Use a different step to test the Jetstream Pytorch version, to avoid conflicts with torch-xla[tpu]
- name: Install and test TGI server slow tests (Jetstream Pytorch)
run: |
pip install -U .[jetstream-pt] \
-f https://storage.googleapis.com/jax-releases/jax_nightly_releases.html \
-f https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html \
-f https://storage.googleapis.com/libtpu-releases/index.html
JETSTREAM_PT=1 HF_TOKEN=${{ secrets.HF_TOKEN_OPTIMUM_TPU_CI }} python -m \
pytest -sv text-generation-inference/tests --runslow -k jetstream