Skip to content

Commit

Permalink
Merge branch 'rapidsai:branch-24.12' into b24.12-begin-publish-package
Browse files Browse the repository at this point in the history
  • Loading branch information
nv-rliu authored Nov 8, 2024
2 parents fc6dfdb + 1fa7c1e commit ddd2bb8
Show file tree
Hide file tree
Showing 26 changed files with 308 additions and 64 deletions.
24 changes: 21 additions & 3 deletions .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@ jobs:
- changed-files
- checks
- conda-python-build
- conda-python-tests
- wheel-build-nx-cugraph
- wheel-tests-nx-cugraph
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
if: always()
Expand Down Expand Up @@ -46,14 +48,30 @@ jobs:
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: pull-request
# This selects "ARCH=amd64 + the latest supported Python + CUDA".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
conda-python-tests:
needs: [conda-python-build, changed-files]
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
if: fromJSON(needs.changed-files.outputs.changed_file_groups).test_python
with:
build_type: pull-request
run_codecov: false
wheel-build-nx-cugraph:
needs: [checks]
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
with:
build_type: pull-request
script: ci/build_wheel_nx-cugraph.sh
# This selects "ARCH=amd64 + the latest supported Python + CUDA".
# This selects "ARCH=amd64 + the latest supported Python, 1 job per major CUDA version".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
wheel-tests-nx-cugraph:
needs: [wheel-build-nx-cugraph, changed-files]
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
if: fromJSON(needs.changed-files.outputs.changed_file_groups).test_python
with:
build_type: pull-request
script: ci/test_wheel_nx-cugraph.sh
# This selects "ARCH=amd64 + the latest supported Python, 1 job per major CUDA version".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
5 changes: 5 additions & 0 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ jobs:
branch: ${{ inputs.branch }}
date: ${{ inputs.date }}
sha: ${{ inputs.sha }}
# This selects "ARCH=amd64 + the latest supported Python, 1 job per major CUDA version".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
run_codecov: false
wheel-tests-nx-cugraph:
secrets: inherit
uses: rapidsai/shared-workflows/.github/workflows/[email protected]
Expand All @@ -31,3 +34,5 @@ jobs:
date: ${{ inputs.date }}
sha: ${{ inputs.sha }}
script: ci/test_wheel_nx-cugraph.sh
# This selects "ARCH=amd64 + the latest supported Python, 1 job per major CUDA version".
matrix_filter: map(select(.ARCH == "amd64")) | group_by(.CUDA_VER|split(".")|map(tonumber)|.[0]) | map(max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]))
81 changes: 48 additions & 33 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
## https://pre-commit.com/
#
# Before first use: `pre-commit install`
# To run: `make lint`
# To update: `make lint-update`
# To run: `pre-commit run --all-files`
# To update: `pre-commit autoupdate`
# - &flake8_dependencies below needs updated manually
exclude: '^thirdparty'
fail_fast: false
Expand All @@ -20,28 +20,12 @@ repos:
- id: check-toml
- id: check-yaml
exclude: '^conda/recipes/.*\.yaml$'
- id: check-executables-have-shebangs
- id: debug-statements
- id: end-of-file-fixer
exclude_types: [svg]
- id: mixed-line-ending
- id: no-commit-to-branch
args: [-p, "^branch-2....$"]
- id: trailing-whitespace
- repo: https://github.com/PyCQA/flake8
rev: 7.1.1
hooks:
- id: flake8
args: ["--config=.flake8"] # uses config
additional_dependencies: &flake8_dependencies
# These versions need updated manually
- flake8==7.1.1
- flake8-bugbear==24.8.19
- flake8-simplify==0.21.0
- repo: https://github.com/asottile/yesqa
rev: v1.5.0
hooks:
- id: yesqa
additional_dependencies: *flake8_dependencies
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.22
hooks:
Expand All @@ -65,6 +49,26 @@ repos:
rev: 24.10.0
hooks:
- id: black
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.3
hooks:
- id: ruff
args: [--fix-only, --show-fixes] # --unsafe-fixes]
- repo: https://github.com/PyCQA/flake8
rev: 7.1.1
hooks:
- id: flake8
args: ["--config=.flake8"] # uses config
additional_dependencies: &flake8_dependencies
# These versions need updated manually
- flake8==7.1.1
- flake8-bugbear==24.10.31
- flake8-simplify==0.21.0
- repo: https://github.com/asottile/yesqa
rev: v1.5.0
hooks:
- id: yesqa
additional_dependencies: *flake8_dependencies
- repo: https://github.com/codespell-project/codespell
rev: v2.3.0
hooks:
Expand All @@ -73,18 +77,12 @@ repos:
additional_dependencies: [tomli]
files: ^(nx_cugraph|docs)/
args: ["-L thirdparty,coo,COO,numer"]
- repo: https://github.com/rapidsai/pre-commit-hooks
rev: v0.4.0
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.3
hooks:
- id: verify-copyright
files: |
(?x)
[.](sh|py)$|
[.]flake8[.]cython$|
meta[.]yaml$|
setup[.]cfg$
- id: verify-alpha-spec
args: ["--fix"]
- id: ruff
# Don't have strict linting for miscellaneous code
args: [--extend-exclude, "benchmarks/,ci/,docs/,notebooks/"]
- repo: https://github.com/rapidsai/dependency-file-generator
rev: v1.16.0
hooks:
Expand All @@ -95,7 +93,7 @@ repos:
- id: nx-cugraph-meta-data-update
name: nx-cugraph meta-data updater
entry: bash -c "PYTHONPATH=. python _nx_cugraph/__init__.py"
files: ^nx_cugraph
files: ^(nx_cugraph|_nx_cugraph)/
types: [python]
language: python
pass_filenames: false
Expand All @@ -105,8 +103,25 @@ repos:
- id: nx-cugraph-readme-update
name: nx-cugraph README updater
entry: bash -c "PYTHONPATH=. python ./scripts/update_readme.py ./README.md"
files: ^nx_cugraph/
files: ^(nx_cugraph|_nx_cugraph)/
types_or: [python, markdown]
language: python
pass_filenames: false
additional_dependencies: ["networkx>=3.3"]
additional_dependencies: ["networkx>=3.4"]
- repo: https://github.com/rapidsai/pre-commit-hooks
rev: v0.4.0
hooks:
- id: verify-copyright
files: |
(?x)
[.](sh|py)$|
[.]flake8[.]cython$|
meta[.]yaml$|
setup[.]cfg$
- id: verify-alpha-spec
args: ["--fix"]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: no-commit-to-branch
args: [-p, "^branch-2....$"]
16 changes: 11 additions & 5 deletions _nx_cugraph/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,14 +343,16 @@ def update_env_var(varname):
return d


def _check_networkx_version() -> tuple[int, int]:
def _check_networkx_version() -> tuple[int, int] | tuple[int, int, int]:
"""Check the version of networkx and return ``(major, minor)`` version tuple."""
import re
import warnings

import networkx as nx

version_major, version_minor = nx.__version__.split(".")[:2]
version_major, version_minor, *version_bug = nx.__version__.split(".")[:3]
if has_bug := bool(version_bug):
version_bug = version_bug[0]
if version_major != "3":
warnings.warn(
f"nx-cugraph version {__version__} is only known to work with networkx "
Expand All @@ -363,15 +365,19 @@ def _check_networkx_version() -> tuple[int, int]:
# Allow single-digit minor versions, e.g. 3.4 and release candidates, e.g. 3.4rc0
pattern = r"^\d(rc\d+)?$"

if not re.match(pattern, version_minor):
if not re.match(pattern, version_bug if has_bug else version_minor):
raise RuntimeWarning(
f"nx-cugraph version {__version__} does not work with networkx version "
f"{nx.__version__}. Please upgrade (or fix) your Python environment."
)

nxver_major = int(version_major)
nxver_minor = int(re.match(r"^\d+", version_minor).group())
return (nxver_major, nxver_minor)
if not has_bug:
nxver_minor = int(re.match(r"^\d+", version_minor).group())
return (nxver_major, nxver_minor)
nxver_minor = int(version_minor)
nxver_bug = int(re.match(r"^\d+", version_bug).group())
return (nxver_major, nxver_minor, nxver_bug)


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/nx-cugraph/pytest-based/bench_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def setup_module(module):
# easier-to-read name. This is especially helpful for Dataset objs (see
# https://docs.pytest.org/en/stable/reference/reference.html#pytest-fixture)
@pytest.fixture(
scope="module", params=dataset_param_values, ids=lambda ds: f"ds={str(ds)}"
scope="module", params=dataset_param_values, ids=lambda ds: f"ds={ds!s}"
)
def graph_obj(request):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,7 @@ def get_first_gpu_info():
first_gpu = gpus[0] # Get the information for the first GPU
gpu_name, mem_total, _, _ = first_gpu.split(",")
return f"{num_gpus} x {gpu_name.strip()} ({round(int(mem_total.strip().split()[0]) / (1024), 2)} GB)"
else:
print("No GPU found or unable to query GPU details.")
print("No GPU found or unable to query GPU details.")
except subprocess.CalledProcessError:
print("Failed to execute nvidia-smi. No GPU information available.")

Expand Down
6 changes: 5 additions & 1 deletion ci/build_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@ source rapids-date-string

rapids-print-env

rapids-generate-version > ./VERSION
# TODO: revert this once we start publishing nightly packages from the
# 'nx-cugraph' repo and stop publishing them from the 'cugraph' repo
# rapids-generate-version > ./VERSION
echo "24.12.00a1000" > ./VERSION

rapids-logger "Begin py build"

# TODO: Remove `--no-test` flags once importing on a CPU
Expand Down
11 changes: 11 additions & 0 deletions ci/run_nx_cugraph_pytests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/bin/bash
# Copyright (c) 2024, NVIDIA CORPORATION.

set -euo pipefail

# Support invoking run_nx_cugraph_pytests.sh outside the script directory
cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")"/..

# Only be verbose and display print information for the first pytest command here
NX_CUGRAPH_USE_COMPAT_GRAPHS=False pytest --capture=no --verbose --cache-clear --benchmark-disable "$@" ./nx_cugraph/tests
NX_CUGRAPH_USE_COMPAT_GRAPHS=True pytest --cache-clear --benchmark-disable "$@" ./nx_cugraph/tests
101 changes: 101 additions & 0 deletions ci/test_python.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
#!/bin/bash
# Copyright (c) 2022-2024, NVIDIA CORPORATION.

set -euo pipefail

# Support invoking test_python.sh outside the script directory
cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")"/../

. /opt/conda/etc/profile.d/conda.sh

RAPIDS_VERSION="$(rapids-version)"

rapids-logger "Generate Python testing dependencies"
rapids-dependency-file-generator \
--output conda \
--file-key test_python \
--matrix "cuda=${RAPIDS_CUDA_VERSION%.*};arch=$(arch);py=${RAPIDS_PY_VERSION}" | tee env.yaml

rapids-mamba-retry env create --yes -f env.yaml -n test

# Temporarily allow unbound variables for conda activation.
set +u
conda activate test
set -u

rapids-logger "Downloading artifacts from previous jobs"
PYTHON_CHANNEL=$(rapids-download-conda-from-s3 python)

RAPIDS_TESTS_DIR=${RAPIDS_TESTS_DIR:-"${PWD}/test-results"}
RAPIDS_COVERAGE_DIR=${RAPIDS_COVERAGE_DIR:-"${PWD}/coverage-results"}
mkdir -p "${RAPIDS_TESTS_DIR}" "${RAPIDS_COVERAGE_DIR}"

rapids-print-env

# TODO: remove the '>=24.12.00a1000' once we start publishing nightly packages
# from the 'nx-cugraph' repo and stop publishing them from the 'cugraph' repo
rapids-mamba-retry install \
--channel "${PYTHON_CHANNEL}" \
"nx-cugraph=${RAPIDS_VERSION}.*,>=24.12.00a1000"

rapids-logger "Check GPU usage"
nvidia-smi

# export LD_PRELOAD="${CONDA_PREFIX}/lib/libgomp.so.1"

# RAPIDS_DATASET_ROOT_DIR is used by test scripts
# export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)"
# pushd "${RAPIDS_DATASET_ROOT_DIR}"
# ./get_test_data.sh --benchmark
# popd

EXITCODE=0
trap "EXITCODE=1" ERR
set +e

rapids-logger "pytest nx-cugraph"
./ci/run_nx_cugraph_pytests.sh \
--junitxml="${RAPIDS_TESTS_DIR}/junit-nx-cugraph.xml" \
--cov=nx_cugraph \
--cov-report=xml:"${RAPIDS_COVERAGE_DIR}/nx-cugraph-coverage.xml" \
--cov-report=term

rapids-logger "pytest networkx using nx-cugraph backend"

pushd nx_cugraph
../run_nx_tests.sh

# run_nx_tests.sh outputs coverage data, so check that total coverage is >0.0%
# in case nx-cugraph failed to load but fallback mode allowed the run to pass.
_coverage=$(coverage report | grep "^TOTAL")

echo "nx-cugraph coverage from networkx tests: $_coverage"
echo $_coverage | awk '{ if ($NF == "0.0%") exit 1 }'

# Ensure all algorithms were called by comparing covered lines to function lines.
# Run our tests again (they're fast enough) to add their coverage, then create coverage.json
NX_CUGRAPH_USE_COMPAT_GRAPHS=False pytest \
--pyargs nx_cugraph \
--config-file=../pyproject.toml \
--cov-config=../pyproject.toml \
--cov=nx_cugraph \
--cov-append \
--cov-report=

coverage report \
--include="*/nx_cugraph/algorithms/*" \
--omit=__init__.py \
--show-missing \
--rcfile=../pyproject.toml

coverage json --rcfile=../pyproject.toml

python -m nx_cugraph.tests.ensure_algos_covered

# Exercise (and show results of) scripts that show implemented networkx algorithms
python -m nx_cugraph.scripts.print_tree --dispatch-name --plc --incomplete --different
python -m nx_cugraph.scripts.print_table
popd

rapids-logger "Test script exiting with value: $EXITCODE"
exit ${EXITCODE}
Loading

0 comments on commit ddd2bb8

Please sign in to comment.