From a4a6b83bf1945061fab927437df7538bf1e8eb4f Mon Sep 17 00:00:00 2001 From: James Lamb Date: Thu, 10 Oct 2024 13:55:26 -0500 Subject: [PATCH 1/3] combine pip install calls in wheel-testing scripts (#4701) ## Summary Follow-up to #4690. Proposes consolidating stuff like this in CI scripts: ```shell pip install A pip install B pip install C ``` Into this: ```shell pip install A B C ``` ## Benefits of these changes Reduces the risk of creating a broken environment with incompatible packages. Unlike `conda`, `pip` does not evaluate the requirements of all installed packages when you run `pip` install. Installing `torch` and `cugraph-dgl` at the same time, for example, gives us a chance to find out about packaging issues like *"`cugraph-dgl` and `torch` have conflicting requirements on `{other_package}`"* at CI time. Similar change from `cudf`: https://github.com/rapidsai/cudf/pull/16575 Authors: - James Lamb (https://github.com/jameslamb) Approvers: - Kyle Edwards (https://github.com/KyleFromNVIDIA) - Alex Barghi (https://github.com/alexbarghi-nv) URL: https://github.com/rapidsai/cugraph/pull/4701 --- ci/test_wheel.sh | 1 - ci/test_wheel_cugraph-dgl.sh | 25 +++++++-------- ci/test_wheel_cugraph-equivariant.sh | 19 ++++++----- ci/test_wheel_cugraph-pyg.sh | 47 ++++++++++++++-------------- ci/test_wheel_cugraph.sh | 4 +-- ci/test_wheel_nx-cugraph.sh | 2 +- ci/test_wheel_pylibcugraph.sh | 4 +-- 7 files changed, 49 insertions(+), 53 deletions(-) diff --git a/ci/test_wheel.sh b/ci/test_wheel.sh index e3690dfde6e..dfba25bbe1a 100755 --- a/ci/test_wheel.sh +++ b/ci/test_wheel.sh @@ -4,7 +4,6 @@ set -eoxu pipefail package_name=$1 -package_dir=$2 python_package_name=$(echo ${package_name}|sed 's/-/_/g') diff --git a/ci/test_wheel_cugraph-dgl.sh b/ci/test_wheel_cugraph-dgl.sh index 688c58026bd..d7558d43b6d 100755 --- a/ci/test_wheel_cugraph-dgl.sh +++ b/ci/test_wheel_cugraph-dgl.sh @@ -4,24 +4,16 @@ set -eoxu pipefail package_name="cugraph-dgl" -package_dir="python/cugraph-dgl" - -python_package_name=$(echo ${package_name}|sed 's/-/_/g') mkdir -p ./dist RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" -# Download wheels built during this job. +# Download the pylibcugraph, cugraph, and cugraph-dgl built in the previous step RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps RAPIDS_PY_WHEEL_NAME="cugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps -python -m pip install ./local-deps/*.whl - -# use 'ls' to expand wildcard before adding `[extra]` requires for pip RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" RAPIDS_PY_WHEEL_PURE="1" rapids-download-wheels-from-s3 ./dist -# pip creates wheels using python package names -python -m pip install $(ls ./dist/${python_package_name}*.whl)[test] - +# determine pytorch and DGL sources PKG_CUDA_VER="$(echo ${CUDA_VERSION} | cut -d '.' -f1,2 | tr -d '.')" PKG_CUDA_VER_MAJOR=${PKG_CUDA_VER:0:2} if [[ "${PKG_CUDA_VER_MAJOR}" == "12" ]]; then @@ -32,8 +24,15 @@ fi PYTORCH_URL="https://download.pytorch.org/whl/cu${PYTORCH_CUDA_VER}" DGL_URL="https://data.dgl.ai/wheels/torch-2.3/cu${PYTORCH_CUDA_VER}/repo.html" -rapids-logger "Installing PyTorch and DGL" -rapids-retry python -m pip install torch==2.3.0 --index-url ${PYTORCH_URL} -rapids-retry python -m pip install dgl==2.4.0 --find-links ${DGL_URL} +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + -v \ + --extra-index-url "${PYTORCH_URL}" \ + --find-links "${DGL_URL}" \ + "$(echo ./local-deps/pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}*.whl)" \ + "$(echo ./local-deps/cugraph_${RAPIDS_PY_CUDA_SUFFIX}*.whl)" \ + "$(echo ./dist/cugraph_dgl_${RAPIDS_PY_CUDA_SUFFIX}*.whl)[test]" \ + 'dgl==2.4.0' \ + 'torch>=2.3.0,<2.4' python -m pytest python/cugraph-dgl/tests diff --git a/ci/test_wheel_cugraph-equivariant.sh b/ci/test_wheel_cugraph-equivariant.sh index cb952055f06..3be1d578964 100755 --- a/ci/test_wheel_cugraph-equivariant.sh +++ b/ci/test_wheel_cugraph-equivariant.sh @@ -4,19 +4,14 @@ set -eoxu pipefail package_name="cugraph-equivariant" -package_dir="python/cugraph-equivariant" - -python_package_name=$(echo ${package_name}|sed 's/-/_/g') mkdir -p ./dist RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" -# use 'ls' to expand wildcard before adding `[extra]` requires for pip +# Download the cugraph-equivariant built in the previous step RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" RAPIDS_PY_WHEEL_PURE="1" rapids-download-wheels-from-s3 ./dist -# pip creates wheels using python package names -python -m pip install $(ls ./dist/${python_package_name}*.whl)[test] - +# determine pytorch source PKG_CUDA_VER="$(echo ${CUDA_VERSION} | cut -d '.' -f1,2 | tr -d '.')" PKG_CUDA_VER_MAJOR=${PKG_CUDA_VER:0:2} if [[ "${PKG_CUDA_VER_MAJOR}" == "12" ]]; then @@ -26,8 +21,12 @@ else fi PYTORCH_URL="https://download.pytorch.org/whl/cu${PYTORCH_CUDA_VER}" -rapids-logger "Installing PyTorch and e3nn" -rapids-retry python -m pip install torch --index-url ${PYTORCH_URL} -rapids-retry python -m pip install e3nn +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + -v \ + --extra-index-url "${PYTORCH_URL}" \ + "$(echo ./dist/cugraph_equivariant_${RAPIDS_PY_CUDA_SUFFIX}*.whl)[test]" \ + 'e3nn' \ + 'torch>=2.3.0,<2.4' python -m pytest python/cugraph-equivariant/cugraph_equivariant/tests diff --git a/ci/test_wheel_cugraph-pyg.sh b/ci/test_wheel_cugraph-pyg.sh index 8f4b16a2dec..2f508ee830b 100755 --- a/ci/test_wheel_cugraph-pyg.sh +++ b/ci/test_wheel_cugraph-pyg.sh @@ -4,29 +4,16 @@ set -eoxu pipefail package_name="cugraph-pyg" -package_dir="python/cugraph-pyg" - -python_package_name=$(echo ${package_name}|sed 's/-/_/g') mkdir -p ./dist RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" -# Download wheels built during this job. +# Download the pylibcugraph, cugraph, and cugraph-pyg built in the previous step RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps RAPIDS_PY_WHEEL_NAME="cugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps -python -m pip install ./local-deps/*.whl - -# use 'ls' to expand wildcard before adding `[extra]` requires for pip RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" RAPIDS_PY_WHEEL_PURE="1" rapids-download-wheels-from-s3 ./dist -# pip creates wheels using python package names -python -m pip install $(ls ./dist/${python_package_name}*.whl)[test] - -# RAPIDS_DATASET_ROOT_DIR is used by test scripts -export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)" - -# Used to skip certain examples in CI due to memory limitations -export CI_RUN=1 +# determine pytorch and pyg sources if [[ "${CUDA_VERSION}" == "11.8.0" ]]; then PYTORCH_URL="https://download.pytorch.org/whl/cu118" PYG_URL="https://data.pyg.org/whl/torch-2.3.0+cu118.html" @@ -34,15 +21,27 @@ else PYTORCH_URL="https://download.pytorch.org/whl/cu121" PYG_URL="https://data.pyg.org/whl/torch-2.3.0+cu121.html" fi -rapids-logger "Installing PyTorch and PyG dependencies" -rapids-retry python -m pip install torch==2.3.0 --index-url ${PYTORCH_URL} -rapids-retry python -m pip install "torch-geometric>=2.5,<2.6" -rapids-retry python -m pip install \ - ogb \ - pyg_lib \ - torch_scatter \ - torch_sparse \ - -f ${PYG_URL} + +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + -v \ + --extra-index-url "${PYTORCH_URL}" \ + --find-links "${PYG_URL}" \ + "$(echo ./local-deps/pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}*.whl)" \ + "$(echo ./local-deps/cugraph_${RAPIDS_PY_CUDA_SUFFIX}*.whl)" \ + "$(echo ./dist/cugraph_pyg_${RAPIDS_PY_CUDA_SUFFIX}*.whl)[test]" \ + 'ogb' \ + 'pyg_lib' \ + 'torch>=2.3.0,<2.4' \ + 'torch-geometric>=2.5,<2.6' \ + 'torch_scatter' \ + 'torch_sparse' + +# RAPIDS_DATASET_ROOT_DIR is used by test scripts +export RAPIDS_DATASET_ROOT_DIR="$(realpath datasets)" + +# Used to skip certain examples in CI due to memory limitations +export CI_RUN=1 rapids-logger "pytest cugraph-pyg (single GPU)" pushd python/cugraph-pyg/cugraph_pyg diff --git a/ci/test_wheel_cugraph.sh b/ci/test_wheel_cugraph.sh index d351ea21624..295cec7cb10 100755 --- a/ci/test_wheel_cugraph.sh +++ b/ci/test_wheel_cugraph.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2023, NVIDIA CORPORATION. +# Copyright (c) 2023-2024, NVIDIA CORPORATION. set -eoxu pipefail @@ -8,4 +8,4 @@ RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-pylibcugraph-dep python -m pip install --no-deps ./local-pylibcugraph-dep/pylibcugraph*.whl -./ci/test_wheel.sh cugraph python/cugraph +./ci/test_wheel.sh cugraph diff --git a/ci/test_wheel_nx-cugraph.sh b/ci/test_wheel_nx-cugraph.sh index b5adfbcb9d3..024169ae698 100755 --- a/ci/test_wheel_nx-cugraph.sh +++ b/ci/test_wheel_nx-cugraph.sh @@ -8,4 +8,4 @@ RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-deps python -m pip install ./local-deps/*.whl -./ci/test_wheel.sh nx-cugraph python/nx-cugraph +./ci/test_wheel.sh nx-cugraph diff --git a/ci/test_wheel_pylibcugraph.sh b/ci/test_wheel_pylibcugraph.sh index d04cb358d21..ddc9976308b 100755 --- a/ci/test_wheel_pylibcugraph.sh +++ b/ci/test_wheel_pylibcugraph.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2023, NVIDIA CORPORATION. +# Copyright (c) 2023-2024, NVIDIA CORPORATION. set -eoxu pipefail -./ci/test_wheel.sh pylibcugraph python/pylibcugraph +./ci/test_wheel.sh pylibcugraph From 4bb54942898d3fcd55ed8370c4013618bb39841b Mon Sep 17 00:00:00 2001 From: Yunsong Wang Date: Mon, 14 Oct 2024 09:46:25 -0700 Subject: [PATCH 2/3] Clean up the use of cuco hash functions (#4707) This PR improves the use of cuco hashers by replacing detail APIs with public ones and updating device code to use `cuda::std::byte` instead of `std::byte`. Authors: - Yunsong Wang (https://github.com/PointKernel) Approvers: - Chuck Hastings (https://github.com/ChuckHastings) - Seunghwa Kang (https://github.com/seunghwak) URL: https://github.com/rapidsai/cugraph/pull/4707 --- cpp/src/detail/graph_partition_utils.cuh | 14 +++++++------- cpp/src/structure/remove_multi_edges_impl.cuh | 5 +++-- cpp/tests/prims/mg_count_if_v.cu | 2 +- ...mg_per_v_pair_transform_dst_nbr_intersection.cu | 2 +- ...pair_transform_dst_nbr_weighted_intersection.cu | 2 +- cpp/tests/prims/mg_transform_reduce_v.cu | 2 +- cpp/tests/utilities/property_generator_kernels.cuh | 4 ++-- 7 files changed, 16 insertions(+), 15 deletions(-) diff --git a/cpp/src/detail/graph_partition_utils.cuh b/cpp/src/detail/graph_partition_utils.cuh index 00931780266..b10d2e788f4 100644 --- a/cpp/src/detail/graph_partition_utils.cuh +++ b/cpp/src/detail/graph_partition_utils.cuh @@ -43,7 +43,7 @@ struct compute_gpu_id_from_ext_vertex_t { __host__ __device__ int operator()(vertex_t v) const { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; auto vertex_partition_id = static_cast(hash_func(v) % comm_size); return partition_manager::compute_global_comm_rank_from_vertex_partition_id( major_comm_size, minor_comm_size, vertex_partition_id); @@ -58,7 +58,7 @@ struct compute_gpu_id_from_ext_edge_id_t { __host__ __device__ int operator()(edge_t e) const { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; auto vertex_partition_id = static_cast(hash_func(e) % comm_size); return partition_manager::compute_global_comm_rank_from_vertex_partition_id( major_comm_size, minor_comm_size, vertex_partition_id); @@ -88,7 +88,7 @@ struct compute_vertex_partition_id_from_ext_vertex_t { __host__ __device__ int operator()(vertex_t v) const { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; return hash_func(v) % comm_size; } }; @@ -114,7 +114,7 @@ struct compute_gpu_id_from_ext_edge_endpoints_t { __host__ __device__ int operator()(vertex_t major, vertex_t minor) const { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; auto major_vertex_partition_id = static_cast(hash_func(major) % comm_size); auto minor_vertex_partition_id = static_cast(hash_func(minor) % comm_size); auto major_comm_rank = major_vertex_partition_id % major_comm_size; @@ -126,7 +126,7 @@ struct compute_gpu_id_from_ext_edge_endpoints_t { __host__ __device__ int operator()( thrust::tuple pair /* major, minor */) const { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; auto major_vertex_partition_id = static_cast(hash_func(thrust::get<0>(pair)) % comm_size); auto minor_vertex_partition_id = static_cast(hash_func(thrust::get<1>(pair)) % comm_size); auto major_comm_rank = major_vertex_partition_id % major_comm_size; @@ -192,7 +192,7 @@ struct compute_edge_partition_id_from_ext_edge_endpoints_t { __host__ __device__ int operator()(vertex_t major, vertex_t minor) const { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; return (hash_func(major) % comm_size) * minor_comm_size + (hash_func(minor) % comm_size) / major_comm_size; } @@ -200,7 +200,7 @@ struct compute_edge_partition_id_from_ext_edge_endpoints_t { __host__ __device__ int operator()( thrust::tuple pair /* major, minor */) const { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; return (hash_func(thrust::get<0>(pair)) % comm_size) * minor_comm_size + (hash_func(thrust::get<1>(pair)) % comm_size) / major_comm_size; } diff --git a/cpp/src/structure/remove_multi_edges_impl.cuh b/cpp/src/structure/remove_multi_edges_impl.cuh index ce83fdcb66a..7e266ab2caf 100644 --- a/cpp/src/structure/remove_multi_edges_impl.cuh +++ b/cpp/src/structure/remove_multi_edges_impl.cuh @@ -28,6 +28,7 @@ #include +#include #include #include #include @@ -53,8 +54,8 @@ struct hash_src_dst_pair { vertex_t pair[2]; pair[0] = thrust::get<0>(t); pair[1] = thrust::get<1>(t); - cuco::detail::MurmurHash3_32 hash_func{}; - return hash_func.compute_hash(reinterpret_cast(pair), 2 * sizeof(vertex_t)) % + cuco::murmurhash3_32 hash_func{}; + return hash_func.compute_hash(reinterpret_cast(pair), 2 * sizeof(vertex_t)) % num_groups; } }; diff --git a/cpp/tests/prims/mg_count_if_v.cu b/cpp/tests/prims/mg_count_if_v.cu index 56550027936..19ec285109c 100644 --- a/cpp/tests/prims/mg_count_if_v.cu +++ b/cpp/tests/prims/mg_count_if_v.cu @@ -48,7 +48,7 @@ struct test_predicate { test_predicate(int mod_count) : mod(mod_count) {} __device__ bool operator()(vertex_t, const vertex_t& val) { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; return (0 == (hash_func(val) % mod)); } }; diff --git a/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_intersection.cu b/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_intersection.cu index fc6369ec721..4025d4d1b1d 100644 --- a/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_intersection.cu +++ b/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_intersection.cu @@ -137,7 +137,7 @@ class Tests_MGPerVPairTransformDstNbrIntersection cugraph::get_dataframe_buffer_begin(mg_vertex_pair_buffer), cugraph::get_dataframe_buffer_end(mg_vertex_pair_buffer), [comm_rank, num_vertices = mg_graph_view.number_of_vertices()] __device__(size_t i) { - cuco::detail::MurmurHash3_32 + cuco::murmurhash3_32 hash_func{}; // use hash_func to generate arbitrary vertex pairs auto v0 = static_cast(hash_func(i + comm_rank) % num_vertices); auto v1 = static_cast(hash_func(i + num_vertices + comm_rank) % num_vertices); diff --git a/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_weighted_intersection.cu b/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_weighted_intersection.cu index 06a23880d81..8af187554e1 100644 --- a/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_weighted_intersection.cu +++ b/cpp/tests/prims/mg_per_v_pair_transform_dst_nbr_weighted_intersection.cu @@ -163,7 +163,7 @@ class Tests_MGPerVPairTransformDstNbrIntersection cugraph::get_dataframe_buffer_begin(mg_vertex_pair_buffer), cugraph::get_dataframe_buffer_end(mg_vertex_pair_buffer), [comm_rank, num_vertices = mg_graph_view.number_of_vertices()] __device__(size_t i) { - cuco::detail::MurmurHash3_32 + cuco::murmurhash3_32 hash_func{}; // use hash_func to generate arbitrary vertex pairs auto v0 = static_cast(hash_func(i + comm_rank) % num_vertices); auto v1 = static_cast(hash_func(i + num_vertices + comm_rank) % num_vertices); diff --git a/cpp/tests/prims/mg_transform_reduce_v.cu b/cpp/tests/prims/mg_transform_reduce_v.cu index 0e6d71094bd..9e9bee89d67 100644 --- a/cpp/tests/prims/mg_transform_reduce_v.cu +++ b/cpp/tests/prims/mg_transform_reduce_v.cu @@ -53,7 +53,7 @@ struct v_op_t { __device__ auto operator()(vertex_t, vertex_t val) const { - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; return cugraph::test::detail::make_property_value(hash_func(val) % mod); } }; diff --git a/cpp/tests/utilities/property_generator_kernels.cuh b/cpp/tests/utilities/property_generator_kernels.cuh index d8d5cc420fd..78b22e0dac2 100644 --- a/cpp/tests/utilities/property_generator_kernels.cuh +++ b/cpp/tests/utilities/property_generator_kernels.cuh @@ -60,7 +60,7 @@ struct vertex_property_transform { { static_assert(cugraph::is_thrust_tuple_of_arithmetic::value || std::is_arithmetic_v); - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; return make_property_value(hash_func(v) % mod); } }; @@ -74,7 +74,7 @@ struct edge_property_transform { { static_assert(cugraph::is_thrust_tuple_of_arithmetic::value || std::is_arithmetic_v); - cuco::detail::MurmurHash3_32 hash_func{}; + cuco::murmurhash3_32 hash_func{}; return make_property_value(hash_func(src + dst) % mod); } }; From 21fe9bfdf9ead952b30f114eadf6d39edde3edea Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 15 Oct 2024 05:55:05 -0500 Subject: [PATCH 3/3] Update nx-cugraph to NetworkX 3.4 (#4717) This run `make` in `python/nx-cugraph` directory and update the pre-commit hook. Also fixes `ego_graph` tests to check `nx.config.fallback_to_nx` configuration. Authors: - Erik Welch (https://github.com/eriknw) Approvers: - Rick Ratzel (https://github.com/rlratzel) - Jake Awe (https://github.com/AyodeAwe) URL: https://github.com/rapidsai/cugraph/pull/4717 --- .pre-commit-config.yaml | 4 ++-- python/nx-cugraph/_nx_cugraph/__init__.py | 4 ++-- python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py | 3 +-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8ff284210b7..3687562b48e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -68,7 +68,7 @@ repos: types: [python] language: python pass_filenames: false - additional_dependencies: ["networkx>=3.3"] + additional_dependencies: ["networkx>=3.4"] - repo: local hooks: - id: nx-cugraph-readme-update @@ -78,4 +78,4 @@ repos: types_or: [python, markdown] language: python pass_filenames: false - additional_dependencies: ["networkx>=3.3"] + additional_dependencies: ["networkx>=3.4"] diff --git a/python/nx-cugraph/_nx_cugraph/__init__.py b/python/nx-cugraph/_nx_cugraph/__init__.py index fc0bea47180..9feeda568a6 100644 --- a/python/nx-cugraph/_nx_cugraph/__init__.py +++ b/python/nx-cugraph/_nx_cugraph/__init__.py @@ -36,7 +36,7 @@ "backend_name": "cugraph", "project": "nx-cugraph", "package": "nx_cugraph", - "url": f"https://rapids.ai/nx-cugraph", + "url": "https://rapids.ai/nx-cugraph", "short_summary": "GPU-accelerated backend.", # "description": "TODO", "functions": { @@ -180,7 +180,7 @@ "ego_graph": "Weighted ego_graph with negative cycles is not yet supported. `NotImplementedError` will be raised if there are negative `distance` edge weights.", "eigenvector_centrality": "`nstart` parameter is not used, but it is checked for validity.", "from_pandas_edgelist": "cudf.DataFrame inputs also supported; value columns with str is unsuppported.", - "generic_bfs_edges": "`neighbors` and `sort_neighbors` parameters are not yet supported.", + "generic_bfs_edges": "`neighbors` parameter is not yet supported.", "katz_centrality": "`nstart` isn't used (but is checked), and `normalized=False` is not supported.", "louvain_communities": "`seed` parameter is currently ignored, and self-loops are not yet supported.", "pagerank": "`dangling` parameter is not supported, but it is checked for validity.", diff --git a/python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py b/python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py index 0697a744e85..f3d0a8d3767 100644 --- a/python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py +++ b/python/nx-cugraph/nx_cugraph/tests/test_ego_graph.py @@ -78,7 +78,7 @@ def test_ego_graph_cycle_graph( nx.ego_graph(Gnx, n, **kwargs, backend="cugraph") with pytest.raises(NotImplementedError, match="ego_graph"): nx.ego_graph(Gcg, n, **kwargs, backend="cugraph") - if _nxver < (3, 4): + if _nxver < (3, 4) or not nx.config.fallback_to_nx: with pytest.raises(NotImplementedError, match="ego_graph"): nx.ego_graph(Gcg, n, **kwargs) else: @@ -86,7 +86,6 @@ def test_ego_graph_cycle_graph( # these arguments, so it falls back to networkx. Hence, as it is currently # implemented, the input graph is `nxcg.CudaGraph`, but the output graph # is `nx.Graph`. Should networkx convert back to "cugraph" backend? - # TODO: make fallback to networkx configurable. H2cg = nx.ego_graph(Gcg, n, **kwargs) assert type(H2nx) is type(H2cg) assert_graphs_equal(H2nx, nxcg.from_networkx(H2cg, preserve_all_attrs=True))