-
Notifications
You must be signed in to change notification settings - Fork 18
129 lines (112 loc) · 3.71 KB
/
ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
name: CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
schedule:
# Run every Sunday at midnight
- cron: '0 0 * * 0'
defaults:
run:
shell: bash -l {0}
jobs:
build:
name: ${{ matrix.name }}
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
include:
# Oldest supported versions
- name: Linux (CUDA 10.2, Python 3.8, PyTorch 1.11)
enable_cuda: true
cuda: "10.2.89"
gcc: "8.5.*"
nvcc: "10.2"
python: "3.8.*"
torchani: "2.2.*"
pytorch: "1.11.*"
# Older supported versions
- name: Linux (CUDA 11.2, Python 3.9, PyTorch 1.12)
enable_cuda: true
cuda: "11.2.2"
gcc: "10.3.*"
nvcc: "11.2"
python: "3.9.*"
torchani: "2.2.*"
pytorch: "1.12.*"
# Latest supported versions (with CUDA)
- name: Linux (CUDA 12.2, Python 3.11, PyTorch 2.0)
enable_cuda: true
cuda: "12.2.0"
gcc: "10.3.*"
nvcc: ""
python: "3.11.*"
torchani: "2.2.*"
pytorch: "2.0.*"
# Latest supported versions (without CUDA)
- name: Linux (no CUDA, Python 3.10, PyTorch 2.0)
enable_cuda: false
gcc: "10.3.*"
python: "3.10.*"
pytorch: "2.0.*"
torchani: "2.2.*"
steps:
- name: Check out
uses: actions/checkout@v2
- name: Install CUDA Toolkit
uses: Jimver/[email protected]
with:
cuda: ${{ matrix.cuda }}
linux-local-args: '["--toolkit", "--override"]'
if: ${{ matrix.enable_cuda }}
- name: Install Miniconda
uses: conda-incubator/setup-miniconda@v2
with:
activate-environment: ""
auto-activate-base: true
miniforge-variant: Mambaforge
- name: Prepare dependencies (with CUDA)
if: ${{ matrix.enable_cuda }}
run: |
sed -i -e "/cudatoolkit/c\ - cudatoolkit ${{ matrix.cuda }}" \
-e "/gxx_linux-64/c\ - gxx_linux-64 ${{ matrix.gcc }}" \
-e "/torchani/c\ - torchani ${{ matrix.torchani }}" \
-e "/python/c\ - python ${{ matrix.python }}" \
-e "/pytorch-gpu/c\ - pytorch-gpu ${{ matrix.pytorch }}" \
environment.yml
- name: Prepare dependencies (without CUDA)
if: ${{ !matrix.enable_cuda }}
run: |
sed -i -e "/cudatoolkit/c\ # - cudatoolkit" \
-e "/gxx_linux-64/c\ - gxx_linux-64 ${{ matrix.gcc }}" \
-e "/torchani/c\ - torchani ${{ matrix.torchani }}" \
-e "/python/c\ - python ${{ matrix.python }}" \
-e "/pytorch-gpu/c\ - pytorch-cpu ${{ matrix.pytorch }}" \
environment.yml
- name: Show dependency file
run: cat environment.yml
- name: Install dependencies
run: mamba env create -n nnpops -f environment.yml
env:
# Needed to install pytorch-gpu on a machine without a GPU
CONDA_OVERRIDE_CUDA: ${{ matrix.nvcc }}
- name: List conda environment
run: |
conda activate nnpops
conda list
- name: Configure, compile, and install
run: |
conda activate nnpops
mkdir build && cd build
cmake .. \
-DENABLE_CUDA=${{ matrix.enable_cuda }} \
-DTorch_DIR=$(python -c 'import torch.utils; print(torch.utils.cmake_prefix_path)')/Torch \
-DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX
make install
- name: Test
run: |
conda activate nnpops
cd build
ctest --verbose --exclude-regex TestCuda