Skip to content

Refactor llm perf backend handling #155

Refactor llm perf backend handling

Refactor llm perf backend handling #155

name: CLI CUDA vLLM Tests
on:
workflow_dispatch:
push:
branches:
- main
pull_request:
branches:
- main
types:
- opened
- reopened
- synchronize
- labeled
- unlabeled
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
run_cli_cuda_vllm_single_gpu_tests:
if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'cli') ||
contains( github.event.pull_request.labels.*.name, 'cuda') ||
contains( github.event.pull_request.labels.*.name, 'vllm') ||
contains( github.event.pull_request.labels.*.name, 'single_gpu') ||
contains( github.event.pull_request.labels.*.name, 'cli_cuda_vllm_single_gpu')
}}
runs-on: [single-gpu, nvidia-gpu, a10, ci]
container:
image: vllm/vllm-openai:latest
options: --ipc host --gpus all --entrypoint /bin/bash
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
run: |
pip install -e .[testing]
- name: Run tests
run: |
FORCE_SERIAL=1 pytest -x -s -k "cli and cuda and vllm and not (tp or pp)"
run_cli_cuda_vllm_multi_gpu_tests:
if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'cli') ||
contains( github.event.pull_request.labels.*.name, 'cuda') ||
contains( github.event.pull_request.labels.*.name, 'vllm') ||
contains( github.event.pull_request.labels.*.name, 'multi_gpu') ||
contains( github.event.pull_request.labels.*.name, 'cli_cuda_vllm_multi_gpu')
}}
runs-on: [multi-gpu, nvidia-gpu, 4-a10, ci]
container:
image: vllm/vllm-openai:latest
options: --ipc host --gpus all --entrypoint /bin/bash
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install dependencies
run: |
pip install -e .[testing]
- name: Run tests
run: |
FORCE_SERIAL=1 pytest -x -s -k "cli and cuda and vllm and (tp or pp)"