Skip to content

CI

CI #3491

Workflow file for this run

name: CI
on:
push:
pull_request:
schedule:
- cron: '0 3 * * *'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }}
cancel-in-progress: true
env:
THREADS: 2
CONFIG: RelWithDebInfo
VCPKG_INSTALL: "vcpkg --version; vcpkg install alpaka fmt tinyobjloader boost-mp11 boost-atomic boost-smart-ptr boost-functional boost-container boost-iostreams[core] catch2 xsimd"
jobs:
clang-format:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: DoozyX/[email protected]
with:
exclude: './thirdparty'
extensions: 'cpp,hpp,h,cu'
clangFormatVersion: 16
inplace: True
- name: git diff
run: git diff
- uses: EndBug/add-and-commit@v9
with:
author_name: Third Party
author_email: [email protected]
message: 'Run clang-format'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
clang-tidy:
needs: clang-format
runs-on: ubuntu-22.04
env:
CXX: clang++-16
strategy:
fail-fast: false
matrix:
include:
- files: '../tests/m.*'
- files: '../tests/[^m].*'
- files: '../examples'
steps:
- uses: actions/checkout@v3
- name: add LLVM apt repo
run: |
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
sudo add-apt-repository 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main'
- name: install clang-16
run: |
sudo apt install clang-16 libomp-16-dev clang-tidy-16
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: cmake
run: |
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=$CONFIG \
-DBUILD_TESTING=ON \
-DLLAMA_BUILD_EXAMPLES=ON \
-DCMAKE_BUILD_TYPE=$CONFIG \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-Dalpaka_ACC_CPU_B_SEQ_T_SEQ_ENABLE=ON \
-Dalpaka_ACC_CPU_DISABLE_ATOMIC_REF=ON \
-Dalpaka_CXX_STANDARD=17 \
-DCMAKE_TOOLCHAIN_FILE=$VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake
- name: run clang-tidy
run: |
cd build
sed -i 's/\(-forward-unknown-to-host-compiler\|--generate-code=arch=[^ ]\+\|--expt-extended-lambda\|--extended-lambda\|--expt-relaxed-constexpr\|--use_fast_math\)//g' compile_commands.json # remove NVCC specific flags which clang cannot handle
run-clang-tidy-16 -j $THREADS -header-filter='(tests|include/llama|examples)' -extra-arg=--no-cuda-version-check -extra-arg=-nocudalib -extra-arg=-Wno-unused-command-line-argument ${{ matrix.files }}
coverage:
needs: clang-format
runs-on: ubuntu-22.04
env:
CXX: g++
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: install lcov
run: |
sudo apt install lcov
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: cmake
run: |
mkdir build
cd build
cmake .. -DCMAKE_BUILD_TYPE=Debug \
-DBUILD_TESTING=ON \
-DLLAMA_BUILD_EXAMPLES=OFF \
-DLLAMA_ENABLE_COVERAGE_FOR_TESTS=ON \
-DCMAKE_TOOLCHAIN_FILE=/usr/local/share/vcpkg/scripts/buildsystems/vcpkg.cmake
- name: build tests
run: |
cmake --build build -j $THREADS
- name: run tests
run: |
build/tests
- name: generate coverage report
run: |
lcov --capture --directory build --output-file coverage.info
#lcov --remove coverage.info '/usr/*' --output-file coverage.info
#lcov --list coverage.info
- name: upload coverage report
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
verbose: true
amalgamation:
needs: clang-format
runs-on: ubuntu-22.04
env:
CXX: g++
steps:
- uses: actions/checkout@v3
- name: create llama.hpp
run: ./tools/create-single-header.sh
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: test llama.hpp
run: |
mkdir build
cd build
mkdir llama
cp -p ../single-header/llama.hpp llama
$CXX -std=c++20 -I$VCPKG_INSTALLATION_ROOT/installed/x64-linux/include -I. ../examples/heatequation/heatequation.cpp
- name: upload llama.hpp
uses: actions/upload-artifact@v3
with:
name: llama.hpp
path: single-header/llama.hpp
if-no-files-found: error
build-ubuntu:
needs: clang-format
runs-on: ${{ matrix.os || 'ubuntu-22.04' }}
env:
CXX: ${{ matrix.cxx }}
CUDACXX: ${{ matrix.cudacxx }}
name: ${{ matrix.name }}
strategy:
fail-fast: false
matrix:
include:
- name: build-ubuntu-gcc9
cxx: g++-9
- name: build-ubuntu-gcc10
cxx: g++-10
- name: build-ubuntu-gcc10-nvcc11.4
cxx: g++-10
cuda_url: https://developer.download.nvidia.com/compute/cuda/11.4.4/local_installers/cuda_11.4.4_470.82.01_linux.run
- name: build-ubuntu-gcc10-nvcc11.5
cxx: g++-10
cuda_url: https://developer.download.nvidia.com/compute/cuda/11.5.2/local_installers/cuda_11.5.2_495.29.05_linux.run
- name: build-ubuntu-gcc10-nvcc11.6
cxx: g++-10
cuda_url: https://developer.download.nvidia.com/compute/cuda/11.6.2/local_installers/cuda_11.6.2_510.47.03_linux.run
- name: build-ubuntu-gcc11
cxx: g++-11
- name: build-ubuntu-gcc11-nvcc11.7
cxx: g++-11
cuda_url: https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda_11.7.1_515.65.01_linux.run
- name: build-ubuntu-gcc11-nvcc11.8
cxx: g++-11
cuda_url: https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
- name: build-ubuntu-gcc12
cxx: g++-12
install_extra: g++-12
- name: build-ubuntu-gcc12-nvcc12.0
cxx: g++-12
install_extra: g++-12
cuda_url: https://developer.download.nvidia.com/compute/cuda/12.0.1/local_installers/cuda_12.0.1_525.85.12_linux.run
- name: build-ubuntu-gcc12-nvcc12.1
cxx: g++-12
install_extra: g++-12
cuda_url: https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run
- name: build-ubuntu-gcc12-nvcc12.2
cxx: g++-12
install_extra: g++-12
cuda_url: https://developer.download.nvidia.com/compute/cuda/12.2.1/local_installers/cuda_12.2.1_535.86.10_linux.run
- name: build-ubuntu-gcc13
cxx: g++-13
add_toolchain_repo: true
install_extra: g++-13
- name: build-ubuntu-clang10
os: ubuntu-20.04 # clang-10 is not available on ubuntu-22.04
cxx: clang++-10
install_extra: clang-10 libomp-10-dev
- name: build-ubuntu-clang11
cxx: clang++-11
asan: OFF # ERROR: AddressSanitizer failed to allocate 0x0 (0) bytes of SetAlternateSignalStack (error code: 22)
install_extra: clang-11 libomp-11-dev
- name: build-ubuntu-clang12
cxx: clang++-12
install_extra: clang-12 libomp-12-dev
- name: build-ubuntu-clang13
cxx: clang++-13
install_extra: clang-13 libomp-13-dev
- name: build-ubuntu-clang14
cxx: clang++-14
install_extra: clang-14 libomp-14-dev
- name: build-ubuntu-clang15
cxx: clang++-15
install_extra: clang-15 libomp-15-dev
add_llvm_repo: true
- name: build-ubuntu-clang16
cxx: clang++-16
install_extra: clang-16 libomp-16-dev
add_llvm_repo: true
- name: build-ubuntu-clang16-cuda11.6
cxx: clang++-16
cudacxx: clang++-16
install_extra: clang-16 libomp-16-dev
cuda_url: https://developer.download.nvidia.com/compute/cuda/12.1.0/local_installers/cuda_12.1.0_530.30.02_linux.run
add_llvm_repo: true
cxx_std: 20
- name: build-ubuntu-icpx
cxx: icpx
add_oneapi_repo: true
install_extra: intel-oneapi-compiler-dpcpp-cpp g++-12 # for libstdc++
cxx_flags: --gcc-install-dir=/usr/lib/gcc/x86_64-linux-gnu/12
- name: build-ubuntu-nvc++
cxx: /opt/nvidia/hpc_sdk/Linux_x86_64/23.5/compilers/bin/nvc++
add_nvcpp_repo: true
install_extra: nvhpc-23-5
steps:
- uses: actions/checkout@v3
- run: lscpu
- name: add ubuntu toolchain repo
if: matrix.add_toolchain_repo
run: |
sudo add-apt-repository ppa:ubuntu-toolchain-r/ppa
sudo apt update
- name: add LLVM apt repo
if: matrix.add_llvm_repo
run: |
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add -
sudo add-apt-repository 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-12 main'
sudo add-apt-repository 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-13 main'
sudo add-apt-repository 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-14 main'
sudo add-apt-repository 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-15 main'
sudo add-apt-repository 'deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main'
- name: add CUDA apt repo
if: matrix.add_nvcpp_repo
run: |
echo 'deb [trusted=yes] https://developer.download.nvidia.com/hpc-sdk/ubuntu/amd64 /' | sudo tee /etc/apt/sources.list.d/nvhpc.list
sudo apt update
- name: install OneAPI
if: matrix.add_oneapi_repo
run: |
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main"
sudo apt update
- name: install extras
if: ${{ matrix.install_extra }}
run: |
sudo apt install ${{ matrix.install_extra }}
- name: vcpkg install dependencies
run: |
# vcpkg fails to build with Intel or nvhpc compilers
if [ ${{ matrix.add_oneapi_repo }} ] || [ ${{ matrix.add_nvcpp_repo }} ]; then unset CXX; fi
eval $VCPKG_INSTALL
- name: download CUDA
if: matrix.cuda_url
run: |
wget --no-verbose -O cuda_installer.run ${{ matrix.cuda_url }}
- name: install CUDA
if: matrix.cuda_url
run: |
sudo sh cuda_installer.run --silent --toolkit --override
- name: cmake
run: |
if [ ${{ matrix.add_oneapi_repo }} ]; then source /opt/intel/oneapi/setvars.sh; fi
mkdir build
cd build
# try to find nvcc if no CUDACXX is provided
if [ -z "$CUDACXX" ]; then
CUDACXX=(`echo /usr/local/cuda-*/bin/nvcc`)
if [ ! -f $CUDACXX ]; then
unset CUDACXX
fi
fi
echo "CUDACXX is here: $CUDACXX"
CXX_FLAGS=${{ matrix.cxx_flags }}
if [ ${{ matrix.add_nvcpp_repo }} ]; then
# cmake (in some versions) passes some flags that nvc++ does not understand
CXX_FLAGS+=" -noswitcherror"
fi
cmake .. -DBUILD_TESTING=ON \
-DLLAMA_BUILD_EXAMPLES=ON \
-DCMAKE_BUILD_TYPE=$CONFIG \
-DLLAMA_ENABLE_ASAN_FOR_TESTS=${{ matrix.asan || 'ON' }} \
-Dalpaka_ACC_CPU_B_SEQ_T_SEQ_ENABLE=${{ !matrix.cuda_url }} \
-Dalpaka_ACC_CPU_DISABLE_ATOMIC_REF=ON \
-Dalpaka_ACC_GPU_CUDA_ENABLE=${{ !!matrix.cuda_url }} \
-Dalpaka_CXX_STANDARD=${{ matrix.cxx_std || '17' }} \
-DCMAKE_CUDA_COMPILER=$CUDACXX \
-DCMAKE_CUDA_HOST_COMPILER=$CXX \
-DCMAKE_CXX_FLAGS="$CXX_FLAGS" \
-DCMAKE_CUDA_FLAGS="${{ matrix.cuda_flags }}" \
-DCMAKE_TOOLCHAIN_FILE=$VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake
- name: build tests + examples
run: |
if [ ${{ matrix.add_oneapi_repo }} ]; then source /opt/intel/oneapi/setvars.sh; fi
cmake --build build -j $THREADS
- name: run tests
run: |
if [ ${{ matrix.add_oneapi_repo }} ]; then source /opt/intel/oneapi/setvars.sh; fi
build/tests
build-windows:
needs: clang-format
runs-on: ${{ matrix.runs-on }}
env:
VCPKG_DEFAULT_TRIPLET: x64-windows
name: ${{ matrix.name }}
strategy:
fail-fast: false
matrix:
include:
- name: build-windows-VS2022
runs-on: windows-2022
defaults:
run:
shell: bash
steps:
- uses: actions/checkout@v3
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: cmake
run: |
mkdir build
cd build
cmake .. -DBUILD_TESTING=ON \
-DLLAMA_BUILD_EXAMPLES=ON \
-Dalpaka_ACC_CPU_B_SEQ_T_SEQ_ENABLE=ON \
-DCMAKE_TOOLCHAIN_FILE="$VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake"
- name: build tests + examples
run: cmake --build build -j $THREADS --config $CONFIG
- name: run tests
run: |
build/$CONFIG/tests
build-macos:
needs: clang-format
runs-on: ${{ matrix.os }}
name: build-${{ matrix.os }}
strategy:
fail-fast: false
matrix:
include:
- os: macos-12
- os: macos-13
steps:
- uses: actions/checkout@v3
- name: brew install dependencies
run: |
brew install llvm libomp pkg-config
echo "CXX is here: $(brew --prefix llvm)/bin/clang++"
- name: vcpkg install dependencies
run: |
eval $VCPKG_INSTALL
- name: cmake
run: |
mkdir build
cd build
export CXX="$(brew --prefix llvm)/bin/clang++"
cmake .. -DBUILD_TESTING=ON \
-DLLAMA_BUILD_EXAMPLES=ON \
-DCMAKE_BUILD_TYPE=$CONFIG \
-Dalpaka_ACC_CPU_B_SEQ_T_SEQ_ENABLE=ON \
-Dalpaka_CXX_STANDARD=17 \
-DCMAKE_TOOLCHAIN_FILE=$VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake
- name: build tests + examples
run: |
cmake --build build -j $THREADS
- name: run tests
run: |
build/tests