Skip to content
This repository was archived by the owner on Oct 11, 2024. It is now read-only.

make venv input optional #369

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 0 additions & 11 deletions .github/actions/nm-benchmark/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,6 @@ inputs:
output_directory:
description: 'output directory to store the benchmark results'
required: true
python:
description: 'python version, e.g. 3.10.12'
required: true
venv:
description: 'name for python virtual environment'
required: true
runs:
using: composite
steps:
Expand All @@ -22,11 +16,6 @@ runs:
# move source directories
mv vllm vllm-ignore || echo "no 'vllm' folder to move"
mv csrc csrc-ignore || echo "no 'csrc' folder to move"
if [ ! -z "${{ inputs.venv }}" ]; then
COMMIT=${{ github.sha }}
VENV="${{ inputs.venv }}-${COMMIT:0:7}"
source $(pyenv root)/versions/${{ inputs.python }}/envs/${VENV}/bin/activate
fi
pip3 install -r neuralmagic/benchmarks/requirements-benchmark.txt
SUCCESS=0
.github/scripts/nm-run-benchmarks.sh ${{ inputs.benchmark_config_list_file }} ${{ inputs.output_directory }} || SUCCESS=$?
Expand Down
12 changes: 0 additions & 12 deletions .github/actions/nm-install-whl/action.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,5 @@
name: install whl
description: 'installs found whl based on python version into specified venv'
inputs:
python:
description: 'python version, e.g. 3.10.12'
required: true
venv:
description: 'name for python virtual environment'
required: true
runs:
using: composite
steps:
Expand All @@ -16,11 +9,6 @@ runs:
mv vllm vllm-ignore
mv csrc csrc-ignore
# activate and install
if [ ! -z "${{ inputs.venv }}" ]; then
COMMIT=${{ github.sha }}
VENV="${{ inputs.venv }}-${COMMIT:0:7}"
source $(pyenv root)/versions/${{ inputs.python }}/envs/${VENV}/bin/activate
fi
pip3 install -r requirements-dev.txt
WHL=$(find . -type f -iname "nm_vllm*.whl")
WHL_BASENAME=$(basename ${WHL})
Expand Down
12 changes: 0 additions & 12 deletions .github/actions/nm-lm-eval/action.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,6 @@
name: run lm-eval accuracy test
description: 'run lm-eval accuracy test'
inputs:
python:
description: 'python version, e.g. 3.10.12'
required: true
venv:
description: 'name for python virtual environment'
required: true
lm_eval_configuration:
description: 'file containing test configuration'
required: true
Expand All @@ -15,12 +9,6 @@ runs:
steps:
- id: lm-eval
run: |
if [ -n "${{ inputs.venv }}" ]; then
COMMIT=${{ github.sha }}
VENV="${{ inputs.venv }}-${COMMIT:0:7}"
source $(pyenv root)/versions/${{ inputs.python }}/envs/${VENV}/bin/activate
fi

pip3 install git+https://github.com/EleutherAI/lm-evaluation-harness.git@262f879a06aa5de869e5dd951d0ff2cf2f9ba380
pip3 install pytest openai==1.3.9

Expand Down
3 changes: 2 additions & 1 deletion .github/actions/nm-produce-gha-benchmark-json/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ inputs:
required: true
venv:
description: 'name for python virtual environment'
required: true
required: false
default: ""
runs:
using: composite
steps:
Expand Down
6 changes: 0 additions & 6 deletions .github/workflows/nm-benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -120,17 +120,12 @@ jobs:
- name: install whl
id: install_whl
uses: ./.github/actions/nm-install-whl/
with:
python: ${{ inputs.python }}
venv:

- name: run benchmarks
uses: ./.github/actions/nm-benchmark/
with:
benchmark_config_list_file: ${{ inputs.benchmark_config_list_file }}
output_directory: benchmark-results
python: ${{ inputs.python }}
venv:

- name: store benchmark result artifacts
if: success()
Expand Down Expand Up @@ -171,7 +166,6 @@ jobs:
# Metrics that we only want to observe are stored here
observation_metrics_output_file_path: gh-action-benchmark-jsons/observation_metrics.json
python: ${{ inputs.python }}
venv:

- name: set gh action benchmark input artifact name
id: set_gh_action_benchmark_input_artifact_name
Expand Down
5 changes: 0 additions & 5 deletions .github/workflows/nm-lm-eval.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,13 +105,8 @@ jobs:
- name: install whl
id: install_whl
uses: ./.github/actions/nm-install-whl/
with:
python: ${{ inputs.python }}
venv:

- name: run lm-eval-accuracy
uses: ./.github/actions/nm-lm-eval/
with:
python: ${{ inputs.python }}
venv:
lm_eval_configuration: ${{ inputs.lm_eval_configuration }}
3 changes: 0 additions & 3 deletions .github/workflows/nm-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,6 @@ jobs:

- name: install whl
uses: ./.github/actions/nm-install-whl/
with:
python: ${{ inputs.python }}
venv:

- name: run buildkite script
run: |
Expand Down
Loading