48
48
script : |
49
49
conda create --yes --quiet -n test python=3.8
50
50
conda activate test
51
- python3 -m pip install --extra- index-url https://download.pytorch.org/whl/nightly/cpu --pre torch
51
+ python3 -m pip install --index-url https://download.pytorch.org/whl/nightly/cpu --pre torch
52
52
# Can import pytorch
53
53
python3 -c 'import torch'
54
54
test-gpu :
@@ -57,24 +57,24 @@ jobs:
57
57
matrix :
58
58
runner_type : ["linux.4xlarge.nvidia.gpu", "linux.g5.4xlarge.nvidia.gpu"]
59
59
with :
60
- job-name : " linux-py3.8-cu116 "
60
+ job-name : " linux-py3.8-cu121 "
61
61
runner : ${{ matrix.runner_type }}
62
62
test-infra-repository : ${{ github.repository }}
63
63
test-infra-ref : ${{ github.ref }}
64
64
submodules : ${{ 'true' }}
65
65
gpu-arch-type : cuda
66
- gpu-arch-version : " 11.6 "
66
+ gpu-arch-version : " 12.1 "
67
67
timeout : 60
68
68
script : |
69
69
conda create --yes --quiet -n test python=3.8
70
70
conda activate test
71
- python3 -m pip install --extra- index-url https://download.pytorch.org/whl/nightly/cu116 --pre torch
71
+ python3 -m pip install --index-url https://download.pytorch.org/whl/nightly/cu121 --pre torch
72
72
# Can import pytorch, cuda is available
73
73
python3 -c 'import torch;cuda_avail = torch.cuda.is_available();print("CUDA available: " + str(cuda_avail));assert(cuda_avail)'
74
74
python3 -c 'import torch;t = torch.ones([2,2], device="cuda:0");print(t);print("tensor device:" + str(t.device))'
75
75
nvidia-smi
76
- nvcc --version | grep "cuda_11.6 "
77
- [[ "${CUDA_HOME}" == "/usr/local/cuda-11.6 " ]] || exit 1
76
+ nvcc --version | grep "cuda_12.1 "
77
+ [[ "${CUDA_HOME}" == "/usr/local/cuda-12.1 " ]] || exit 1
78
78
test-docker-image :
79
79
uses : ./.github/workflows/linux_job.yml
80
80
with :
0 commit comments