From bcd37684268a919f25aa5b9eb88f4e59aca1e7b4 Mon Sep 17 00:00:00 2001 From: "Hongzhi (Steve), Chen" Date: Mon, 7 Nov 2022 08:16:41 +0800 Subject: [PATCH] [Misc] Replace /*! with /**. (#4823) * replace * blabla * balbla * blabla Co-authored-by: Steve --- featgraph/include/featgraph.h | 2 +- featgraph/src/featgraph.cc | 2 +- featgraph/src/tvm_runtime_pack.cc | 4 +- include/dgl/array.h | 2 +- include/dgl/array_iterator.h | 2 +- include/dgl/aten/array_ops.h | 58 +-- include/dgl/aten/coo.h | 86 ++-- include/dgl/aten/csr.h | 409 +++++++++--------- include/dgl/aten/macro.h | 24 +- include/dgl/aten/spmat.h | 6 +- include/dgl/aten/types.h | 4 +- include/dgl/base_heterograph.h | 200 ++++----- include/dgl/bcast.h | 12 +- include/dgl/graph.h | 104 ++--- include/dgl/graph_interface.h | 112 ++--- include/dgl/graph_op.h | 28 +- include/dgl/graph_serializer.h | 2 +- include/dgl/graph_traversal.h | 20 +- include/dgl/immutable_graph.h | 144 +++--- include/dgl/kernel.h | 10 +- include/dgl/lazy.h | 14 +- include/dgl/nodeflow.h | 18 +- include/dgl/packed_func_ext.h | 4 +- include/dgl/random.h | 36 +- include/dgl/runtime/c_backend_api.h | 24 +- include/dgl/runtime/c_object_api.h | 14 +- include/dgl/runtime/c_runtime_api.h | 136 +++--- include/dgl/runtime/config.h | 2 +- include/dgl/runtime/container.h | 128 +++--- include/dgl/runtime/device_api.h | 54 +-- include/dgl/runtime/dlpack_convert.h | 14 +- include/dgl/runtime/module.h | 52 +-- include/dgl/runtime/ndarray.h | 128 +++--- include/dgl/runtime/object.h | 64 +-- include/dgl/runtime/packed_func.h | 98 ++--- include/dgl/runtime/parallel_for.h | 8 +- include/dgl/runtime/registry.h | 28 +- include/dgl/runtime/serializer.h | 2 +- include/dgl/runtime/shared_mem.h | 18 +- include/dgl/runtime/smart_ptr_serializer.h | 2 +- include/dgl/runtime/tensordispatch.h | 36 +- include/dgl/runtime/threading_backend.h | 14 +- include/dgl/runtime/util.h | 6 +- include/dgl/sampler.h | 6 +- include/dgl/sampling/negative.h | 4 +- include/dgl/sampling/neighbor.h | 6 +- include/dgl/sampling/randomwalks.h | 8 +- include/dgl/scheduler.h | 6 +- include/dgl/transform.h | 10 +- include/dgl/zerocopy_serializer.h | 24 +- include/intel/cpu_support.h | 4 +- include/intel/meta_utils.h | 2 +- src/api/api_container.cc | 2 +- src/api/api_test.cc | 2 +- src/array/arith.h | 2 +- src/array/array.cc | 4 +- src/array/array_arith.cc | 2 +- src/array/array_op.h | 2 +- src/array/check.h | 2 +- src/array/cpu/array_cumsum.cc | 2 +- src/array/cpu/array_index_select.cc | 2 +- src/array/cpu/array_nonzero.cc | 2 +- src/array/cpu/array_op_impl.cc | 2 +- src/array/cpu/array_pack.cc | 2 +- src/array/cpu/array_repeat.cc | 2 +- src/array/cpu/array_scatter.cc | 2 +- src/array/cpu/array_sort.cc | 2 +- src/array/cpu/array_utils.h | 6 +- src/array/cpu/coo_coalesce.cc | 2 +- src/array/cpu/coo_linegraph.cc | 2 +- src/array/cpu/coo_remove.cc | 6 +- src/array/cpu/coo_sort.cc | 2 +- src/array/cpu/csr_get_data.cc | 2 +- src/array/cpu/csr_mm.cc | 2 +- src/array/cpu/csr_remove.cc | 2 +- src/array/cpu/csr_sort.cc | 2 +- src/array/cpu/csr_sum.cc | 2 +- src/array/cpu/csr_to_simple.cc | 2 +- src/array/cpu/csr_union.cc | 2 +- src/array/cpu/gather_mm.cc | 8 +- src/array/cpu/gather_mm.h | 4 +- src/array/cpu/negative_sampling.cc | 2 +- src/array/cpu/rowwise_pick.h | 2 +- src/array/cpu/rowwise_sampling.cc | 2 +- src/array/cpu/rowwise_topk.cc | 2 +- src/array/cpu/sddmm.cc | 10 +- src/array/cpu/sddmm.h | 6 +- src/array/cpu/segment_reduce.cc | 10 +- src/array/cpu/segment_reduce.h | 12 +- src/array/cpu/spmat_op_impl_coo.cc | 6 +- src/array/cpu/spmat_op_impl_csr.cc | 2 +- src/array/cpu/spmm.cc | 12 +- src/array/cpu/spmm.h | 20 +- src/array/cpu/spmm_binary_ops.h | 2 +- src/array/cpu/spmm_blocking_libxsmm.h | 18 +- src/array/cpu/traversal.cc | 2 +- src/array/cpu/traversal.h | 12 +- src/array/cuda/array_cumsum.cu | 2 +- src/array/cuda/array_index_select.cu | 2 +- src/array/cuda/array_index_select.cuh | 2 +- src/array/cuda/array_nonzero.cu | 2 +- src/array/cuda/array_op_impl.cu | 2 +- src/array/cuda/array_scatter.cu | 2 +- src/array/cuda/array_sort.cu | 2 +- src/array/cuda/atomic.cuh | 2 +- src/array/cuda/coo2csr.cu | 4 +- src/array/cuda/coo_sort.cu | 2 +- src/array/cuda/csr2coo.cu | 4 +- src/array/cuda/csr_get_data.cu | 2 +- src/array/cuda/csr_mm.cu | 6 +- src/array/cuda/csr_sort.cu | 4 +- src/array/cuda/csr_sum.cu | 4 +- src/array/cuda/csr_transpose.cc | 2 +- src/array/cuda/cuda_filter.cu | 2 +- src/array/cuda/cusparse_dispatcher.cuh | 6 +- src/array/cuda/dgl_cub.cuh | 2 +- src/array/cuda/fp16.cuh | 2 +- src/array/cuda/functor.cuh | 2 +- src/array/cuda/gather_mm.cu | 10 +- src/array/cuda/ge_spmm.cuh | 4 +- src/array/cuda/macro.cuh | 2 +- src/array/cuda/negative_sampling.cu | 4 +- src/array/cuda/rowwise_sampling.cu | 2 +- src/array/cuda/rowwise_sampling_prob.cu | 2 +- src/array/cuda/sddmm.cu | 6 +- src/array/cuda/sddmm.cuh | 12 +- src/array/cuda/sddmm_hetero_coo.cu | 4 +- src/array/cuda/sddmm_hetero_csr.cu | 4 +- src/array/cuda/segment_reduce.cu | 2 +- src/array/cuda/segment_reduce.cuh | 18 +- src/array/cuda/spmat_op_impl_coo.cu | 2 +- src/array/cuda/spmat_op_impl_csr.cu | 12 +- src/array/cuda/spmm.cu | 6 +- src/array/cuda/spmm.cuh | 38 +- src/array/cuda/spmm_hetero.cu | 4 +- src/array/cuda/utils.cu | 2 +- src/array/cuda/utils.h | 20 +- src/array/cuda/uvm/array_index_select_uvm.cu | 2 +- src/array/cuda/uvm/array_index_select_uvm.cuh | 9 +- src/array/filter.cc | 2 +- src/array/filter.h | 2 +- src/array/kernel.cc | 32 +- src/array/kernel_decl.h | 38 +- src/array/libra_partition.cc | 12 +- src/array/selector.h | 4 +- src/array/union_partition.cc | 2 +- src/array/uvm_array.cc | 2 +- src/array/uvm_array_op.h | 2 +- src/bcast.cc | 6 +- src/c_api_common.cc | 2 +- src/c_api_common.h | 6 +- src/geometry/cpu/geometry_op_impl.cc | 8 +- src/geometry/cuda/edge_coarsening_impl.cu | 8 +- src/geometry/cuda/geometry_op_impl.cu | 4 +- src/geometry/geometry.cc | 2 +- src/geometry/geometry_op.h | 6 +- src/graph/creators.cc | 2 +- src/graph/gk_ops.cc | 6 +- src/graph/graph.cc | 2 +- src/graph/graph_apis.cc | 2 +- src/graph/graph_op.cc | 2 +- src/graph/graph_traversal.cc | 2 +- src/graph/heterograph.cc | 2 +- src/graph/heterograph.h | 34 +- src/graph/heterograph_capi.cc | 2 +- src/graph/immutable_graph.cc | 16 +- src/graph/metis_partition.cc | 2 +- src/graph/network.cc | 2 +- src/graph/network.h | 72 +-- src/graph/nodeflow.cc | 2 +- src/graph/pickle.cc | 2 +- src/graph/sampler.cc | 26 +- src/graph/sampling/negative/global_uniform.cc | 2 +- src/graph/sampling/neighbor/neighbor.cc | 2 +- .../sampling/randomwalks/frequency_hashmap.cu | 2 +- .../randomwalks/frequency_hashmap.cuh | 2 +- .../randomwalks/get_node_types_cpu.cc | 2 +- .../randomwalks/get_node_types_gpu.cu | 2 +- .../randomwalks/metapath_randomwalk.h | 8 +- src/graph/sampling/randomwalks/node2vec.cc | 2 +- .../sampling/randomwalks/node2vec_cpu.cc | 2 +- .../sampling/randomwalks/node2vec_impl.h | 4 +- .../randomwalks/node2vec_randomwalk.h | 4 +- .../sampling/randomwalks/randomwalk_cpu.cc | 2 +- .../sampling/randomwalks/randomwalk_gpu.cu | 2 +- .../randomwalk_with_restart_cpu.cc | 2 +- src/graph/sampling/randomwalks/randomwalks.cc | 2 +- .../sampling/randomwalks/randomwalks_cpu.h | 4 +- .../sampling/randomwalks/randomwalks_impl.h | 12 +- src/graph/serialize/dglgraph_data.h | 4 +- src/graph/serialize/dglgraph_serialize.cc | 2 +- src/graph/serialize/graph_serialize.cc | 2 +- src/graph/serialize/graph_serialize.h | 4 +- src/graph/serialize/heterograph_data.h | 6 +- src/graph/serialize/heterograph_serialize.cc | 2 +- src/graph/serialize/tensor_serialize.cc | 2 +- src/graph/serialize/zerocopy_serializer.cc | 2 +- src/graph/shared_mem_manager.cc | 2 +- src/graph/shared_mem_manager.h | 2 +- src/graph/subgraph.cc | 2 +- src/graph/transform/compact.cc | 2 +- src/graph/transform/compact.h | 2 +- .../transform/cpu/kdtree_ndarray_adapter.h | 16 +- src/graph/transform/cpu/knn.cc | 16 +- .../transform/cuda/cuda_compact_graph.cu | 2 +- src/graph/transform/cuda/cuda_map_edges.cuh | 2 +- src/graph/transform/cuda/cuda_to_block.cu | 2 +- src/graph/transform/cuda/knn.cu | 28 +- src/graph/transform/knn.cc | 2 +- src/graph/transform/knn.h | 6 +- src/graph/transform/line_graph.cc | 4 +- src/graph/transform/metis_partition_hetero.cc | 2 +- src/graph/transform/partition_hetero.cc | 2 +- src/graph/transform/remove_edges.cc | 2 +- src/graph/transform/to_bipartite.cc | 2 +- src/graph/transform/to_bipartite.h | 2 +- src/graph/transform/to_simple.cc | 2 +- src/graph/transform/union_partition.cc | 2 +- src/graph/traversal.cc | 10 +- src/graph/traversal.h | 12 +- src/graph/unit_graph.cc | 26 +- src/graph/unit_graph.h | 66 +-- src/partition/cuda/partition_op.cu | 2 +- src/partition/ndarray_partition.cc | 2 +- src/partition/ndarray_partition.h | 2 +- src/partition/partition_op.h | 2 +- src/random/cpu/choice.cc | 2 +- src/random/cpu/sample_utils.h | 12 +- src/random/random.cc | 2 +- src/rpc/net_type.h | 16 +- src/rpc/network/common.cc | 2 +- src/rpc/network/common.h | 2 +- src/rpc/network/communicator.h | 24 +- src/rpc/network/msg_queue.cc | 2 +- src/rpc/network/msg_queue.h | 54 +-- src/rpc/network/socket_communicator.cc | 2 +- src/rpc/network/socket_communicator.h | 60 +-- src/rpc/network/socket_pool.cc | 2 +- src/rpc/network/socket_pool.h | 28 +- src/rpc/network/tcp_socket.cc | 2 +- src/rpc/network/tcp_socket.h | 32 +- src/rpc/rpc.cc | 4 +- src/rpc/rpc.h | 38 +- src/rpc/rpc_msg.h | 20 +- src/rpc/server_state.h | 12 +- src/rpc/tensorpipe/queue.h | 2 +- src/rpc/tensorpipe/tp_communicator.cc | 2 +- src/rpc/tensorpipe/tp_communicator.h | 54 +-- src/runtime/c_object_api.cc | 12 +- src/runtime/c_runtime_api.cc | 4 +- src/runtime/config.cc | 2 +- src/runtime/cpu_device_api.cc | 2 +- src/runtime/cuda/cuda_common.h | 20 +- src/runtime/cuda/cuda_device_api.cc | 6 +- src/runtime/cuda/cuda_hashtable.cu | 2 +- src/runtime/cuda/cuda_hashtable.cuh | 6 +- src/runtime/cuda/nccl_api.cu | 2 +- src/runtime/cuda/nccl_api.h | 2 +- src/runtime/dlpack_convert.cc | 2 +- src/runtime/dso_module.cc | 2 +- src/runtime/file_util.cc | 2 +- src/runtime/file_util.h | 18 +- src/runtime/meta_data.h | 4 +- src/runtime/module.cc | 2 +- src/runtime/module_util.cc | 2 +- src/runtime/module_util.h | 8 +- src/runtime/ndarray.cc | 2 +- src/runtime/object.cc | 2 +- src/runtime/pack_args.h | 14 +- src/runtime/parallel_for.cpp | 2 +- src/runtime/registry.cc | 10 +- src/runtime/resource_manager.cc | 4 +- src/runtime/resource_manager.h | 4 +- src/runtime/runtime_base.h | 10 +- src/runtime/semaphore_wrapper.cc | 2 +- src/runtime/semaphore_wrapper.h | 12 +- src/runtime/shared_mem.cc | 4 +- src/runtime/system_lib_module.cc | 2 +- src/runtime/tensordispatch.cc | 2 +- src/runtime/thread_pool.cc | 16 +- src/runtime/thread_storage_scope.h | 44 +- src/runtime/threading_backend.cc | 2 +- src/runtime/utils.cc | 2 +- src/runtime/workspace.h | 2 +- src/runtime/workspace_pool.cc | 8 +- src/runtime/workspace_pool.h | 18 +- src/scheduler/scheduler.cc | 2 +- src/scheduler/scheduler_apis.cc | 2 +- tensoradapter/include/tensoradapter.h | 14 +- tensoradapter/include/tensoradapter_exports.h | 2 +- tensoradapter/pytorch/torch.cpp | 2 +- tests/cpp/graph_index_test.cc | 2 +- tests/cpp/message_queue_test.cc | 2 +- tests/cpp/socket_communicator_test.cc | 2 +- tests/cpp/string_test.cc | 2 +- tests/cpp/test_aten.cc | 18 +- tests/cpp/test_unit_graph.cc | 8 +- 297 files changed, 1964 insertions(+), 1964 deletions(-) diff --git a/featgraph/include/featgraph.h b/featgraph/include/featgraph.h index 35b5ec625bfa..7ac1a4d46af1 100644 --- a/featgraph/include/featgraph.h +++ b/featgraph/include/featgraph.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file featgraph/include/featgraph.h * @brief FeatGraph kernel headers. diff --git a/featgraph/src/featgraph.cc b/featgraph/src/featgraph.cc index e830ad186425..6577c7e680e1 100644 --- a/featgraph/src/featgraph.cc +++ b/featgraph/src/featgraph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file featgraph/src/featgraph.cc * @brief FeatGraph kernels. diff --git a/featgraph/src/tvm_runtime_pack.cc b/featgraph/src/tvm_runtime_pack.cc index 14c7a1ab7076..c0c5355a5eca 100644 --- a/featgraph/src/tvm_runtime_pack.cc +++ b/featgraph/src/tvm_runtime_pack.cc @@ -1,4 +1,4 @@ -/* +/** * NOTE(zihao): this file was modified from TVM project: * - * https://github.com/apache/tvm/blob/9713d675c64ae3075e10be5acadeef1328a44bb5/apps/howto_deploy/tvm_runtime_pack.cc @@ -21,7 +21,7 @@ * under the License. */ -/*! +/** * @brief This is an all in one TVM runtime file. * * You only have to use this file to compile libtvm_runtime to diff --git a/include/dgl/array.h b/include/dgl/array.h index bc29361b362c..decb6f098626 100644 --- a/include/dgl/array.h +++ b/include/dgl/array.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/array.h * @brief Common array operations required by DGL. diff --git a/include/dgl/array_iterator.h b/include/dgl/array_iterator.h index e932eac90230..7986ed08ce99 100644 --- a/include/dgl/array_iterator.h +++ b/include/dgl/array_iterator.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/array_iterator.h * @brief Various iterators. diff --git a/include/dgl/aten/array_ops.h b/include/dgl/aten/array_ops.h index 865bb679b0e4..124d80e49b0a 100644 --- a/include/dgl/aten/array_ops.h +++ b/include/dgl/aten/array_ops.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/aten/array_ops.h * @brief Common array operations required by DGL. @@ -23,20 +23,20 @@ namespace aten { // ID array ////////////////////////////////////////////////////////////////////// -/*! @return A special array to represent null. */ +/** @return A special array to represent null. */ inline NDArray NullArray(const DGLDataType& dtype = DGLDataType{kDGLInt, 64, 1}, const DGLContext& ctx = DGLContext{kDGLCPU, 0}) { return NDArray::Empty({0}, dtype, ctx); } -/*! +/** * @return Whether the input array is a null array. */ inline bool IsNullArray(NDArray array) { return array->shape[0] == 0; } -/*! +/** * @brief Create a new id array with given length * @param length The array length * @param ctx The array context @@ -47,7 +47,7 @@ IdArray NewIdArray(int64_t length, DGLContext ctx = DGLContext{kDGLCPU, 0}, uint8_t nbits = 64); -/*! +/** * @brief Create a new id array using the given vector data * @param vec The vector data * @param nbits The integer bits of the returned array @@ -59,7 +59,7 @@ IdArray VecToIdArray(const std::vector& vec, uint8_t nbits = 64, DGLContext ctx = DGLContext{kDGLCPU, 0}); -/*! +/** * @brief Return an array representing a 1D range. * @param low Lower bound (inclusive). * @param high Higher bound (exclusive). @@ -69,7 +69,7 @@ IdArray VecToIdArray(const std::vector& vec, */ IdArray Range(int64_t low, int64_t high, uint8_t nbits, DGLContext ctx); -/*! +/** * @brief Return an array full of the given value * @param val The value to fill. * @param length Number of elements. @@ -79,7 +79,7 @@ IdArray Range(int64_t low, int64_t high, uint8_t nbits, DGLContext ctx); */ IdArray Full(int64_t val, int64_t length, uint8_t nbits, DGLContext ctx); -/*! +/** * @brief Return an array full of the given value with the given type. * @param val The value to fill. * @param length Number of elements. @@ -89,13 +89,13 @@ IdArray Full(int64_t val, int64_t length, uint8_t nbits, DGLContext ctx); template NDArray Full(DType val, int64_t length, DGLContext ctx); -/*! @brief Create a deep copy of the given array */ +/** @brief Create a deep copy of the given array */ IdArray Clone(IdArray arr); -/*! @brief Convert the idarray to the given bit width */ +/** @brief Convert the idarray to the given bit width */ IdArray AsNumBits(IdArray arr, uint8_t bits); -/*! @brief Arithmetic functions */ +/** @brief Arithmetic functions */ IdArray Add(IdArray lhs, IdArray rhs); IdArray Sub(IdArray lhs, IdArray rhs); IdArray Mul(IdArray lhs, IdArray rhs); @@ -138,30 +138,30 @@ IdArray LE(int64_t lhs, IdArray rhs); IdArray EQ(int64_t lhs, IdArray rhs); IdArray NE(int64_t lhs, IdArray rhs); -/*! @brief Stack two arrays (of len L) into a 2*L length array */ +/** @brief Stack two arrays (of len L) into a 2*L length array */ IdArray HStack(IdArray arr1, IdArray arr2); -/*! @brief Return the indices of the elements that are non-zero. */ +/** @brief Return the indices of the elements that are non-zero. */ IdArray NonZero(BoolArray bool_arr); -/*! +/** * @brief Return the data under the index. In numpy notation, A[I] * @tparam ValueType The type of return value. */ template ValueType IndexSelect(NDArray array, int64_t index); -/*! +/** * @brief Return the data under the index. In numpy notation, A[I] */ NDArray IndexSelect(NDArray array, IdArray index); -/*! +/** * @brief Return the data from `start` (inclusive) to `end` (exclusive). */ NDArray IndexSelect(NDArray array, int64_t start, int64_t end); -/*! +/** * @brief Permute the elements of an array according to given indices. * * Only support 1D arrays. @@ -175,7 +175,7 @@ NDArray IndexSelect(NDArray array, int64_t start, int64_t end); */ NDArray Scatter(NDArray array, IdArray indices); -/*! +/** * @brief Scatter data into the output array. * * Equivalent to: @@ -186,7 +186,7 @@ NDArray Scatter(NDArray array, IdArray indices); */ void Scatter_(IdArray index, NDArray value, NDArray out); -/*! +/** * @brief Repeat each element a number of times. Equivalent to np.repeat(array, repeats) * @param array A 1D vector * @param repeats A 1D integer vector for number of times to repeat for each element in @@ -194,7 +194,7 @@ void Scatter_(IdArray index, NDArray value, NDArray out); */ NDArray Repeat(NDArray array, IdArray repeats); -/*! +/** * @brief Relabel the given ids to consecutive ids. * * Relabeling is done inplace. The mapping is created from the union @@ -211,7 +211,7 @@ NDArray Repeat(NDArray array, IdArray repeats); */ IdArray Relabel_(const std::vector& arrays); -/*! +/** * @brief concatenate the given id arrays to one array * * Example: @@ -224,12 +224,12 @@ IdArray Relabel_(const std::vector& arrays); */ NDArray Concat(const std::vector& arrays); -/*!\brief Return whether the array is a valid 1D int array*/ +/** @brief Return whether the array is a valid 1D int array*/ inline bool IsValidIdArray(const dgl::runtime::NDArray& arr) { return arr->ndim == 1 && arr->dtype.code == kDGLInt; } -/*! +/** * @brief Packs a tensor containing padded sequences of variable length. * * Similar to \c pack_padded_sequence in PyTorch, except that @@ -261,7 +261,7 @@ inline bool IsValidIdArray(const dgl::runtime::NDArray& arr) { template std::tuple Pack(NDArray array, ValueType pad_value); -/*! +/** * @brief Batch-slice a 1D or 2D array, and then pack the list of sliced arrays * by concatenation. * @@ -291,7 +291,7 @@ std::tuple Pack(NDArray array, ValueType pad_value); */ std::pair ConcatSlices(NDArray array, IdArray lengths); -/*! +/** * @brief Return the cumulative summation (or inclusive sum) of the input array. * * The first element out[0] is equal to the first element of the input array @@ -307,7 +307,7 @@ std::pair ConcatSlices(NDArray array, IdArray lengths); */ IdArray CumSum(IdArray array, bool prepend_zero = false); -/*! +/** * @brief Return the nonzero index. * * Only support 1D array. The result index array is in int64. @@ -317,7 +317,7 @@ IdArray CumSum(IdArray array, bool prepend_zero = false); */ IdArray NonZero(NDArray array); -/*! +/** * @brief Sort the ID vector in ascending order. * * It performs both sort and arg_sort (returning the sorted index). The sorted index @@ -334,7 +334,7 @@ IdArray NonZero(NDArray array); */ std::pair Sort(IdArray array, int num_bits = 0); -/*! +/** * @brief Return a string that prints out some debug information. */ std::string ToDebugString(NDArray array); @@ -355,7 +355,7 @@ IdArray VecToIdArray(const std::vector& vec, return ret.CopyTo(ctx); } -/*! +/** * @brief Get the context of the first array, and check if the non-null arrays' * contexts are the same. */ diff --git a/include/dgl/aten/coo.h b/include/dgl/aten/coo.h index 1117b411d3e0..1eef025b2791 100644 --- a/include/dgl/aten/coo.h +++ b/include/dgl/aten/coo.h @@ -1,5 +1,5 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/aten/coo.h * @brief Common COO operations required by DGL. @@ -23,7 +23,7 @@ namespace aten { struct CSRMatrix; -/*! +/** * @brief Plain COO structure * * The data array stores integer ids for reading edge features. @@ -37,21 +37,21 @@ constexpr uint64_t kDGLSerialize_AtenCooMatrixMagic = 0xDD61ffd305dff127; // TODO(BarclayII): Graph queries on COO formats should support the case where // data ordered by rows/columns instead of EID. struct COOMatrix { - /*! @brief the dense shape of the matrix */ + /** @brief the dense shape of the matrix */ int64_t num_rows = 0, num_cols = 0; - /*! @brief COO index arrays */ + /** @brief COO index arrays */ IdArray row, col; - /*! @brief data index array. When is null, assume it is from 0 to NNZ - 1. */ + /** @brief data index array. When is null, assume it is from 0 to NNZ - 1. */ IdArray data; - /*! @brief whether the row indices are sorted */ + /** @brief whether the row indices are sorted */ bool row_sorted = false; - /*! @brief whether the column indices per row are sorted */ + /** @brief whether the column indices per row are sorted */ bool col_sorted = false; - /*! @brief whether the matrix is in pinned memory */ + /** @brief whether the matrix is in pinned memory */ bool is_pinned = false; - /*! @brief default constructor */ + /** @brief default constructor */ COOMatrix() = default; - /*! @brief constructor */ + /** @brief constructor */ COOMatrix(int64_t nrows, int64_t ncols, IdArray rarr, IdArray carr, IdArray darr = NullArray(), bool rsorted = false, bool csorted = false) @@ -65,7 +65,7 @@ struct COOMatrix { CheckValidity(); } - /*! @brief constructor from SparseMatrix object */ + /** @brief constructor from SparseMatrix object */ explicit COOMatrix(const SparseMatrix& spmat) : num_rows(spmat.num_rows), num_cols(spmat.num_cols), @@ -121,7 +121,7 @@ struct COOMatrix { CHECK_NO_OVERFLOW(row->dtype, num_cols); } - /*! @brief Return a copy of this matrix on the give device context. */ + /** @brief Return a copy of this matrix on the give device context. */ inline COOMatrix CopyTo(const DGLContext &ctx) const { if (ctx == row->ctx) return *this; @@ -130,7 +130,7 @@ struct COOMatrix { row_sorted, col_sorted); } - /*! + /** * @brief Pin the row, col and data (if not Null) of the matrix. * @note This is an in-place method. Behavior depends on the current context, * kDGLCPU: will be pinned; @@ -149,7 +149,7 @@ struct COOMatrix { is_pinned = true; } - /*! + /** * @brief Unpin the row, col and data (if not Null) of the matrix. * @note This is an in-place method. Behavior depends on the current context, * IsPinned: will be unpinned; @@ -167,7 +167,7 @@ struct COOMatrix { is_pinned = false; } - /*! + /** * @brief Record stream for the row, col and data (if not Null) of the matrix. * @param stream The stream that is using the graph */ @@ -182,28 +182,28 @@ struct COOMatrix { ///////////////////////// COO routines ////////////////////////// -/*! @brief Return true if the value (row, col) is non-zero */ +/** @brief Return true if the value (row, col) is non-zero */ bool COOIsNonZero(COOMatrix , int64_t row, int64_t col); -/*! +/** * @brief Batched implementation of COOIsNonZero. * @note This operator allows broadcasting (i.e, either row or col can be of length 1). */ runtime::NDArray COOIsNonZero(COOMatrix , runtime::NDArray row, runtime::NDArray col); -/*! @brief Return the nnz of the given row */ +/** @brief Return the nnz of the given row */ int64_t COOGetRowNNZ(COOMatrix , int64_t row); runtime::NDArray COOGetRowNNZ(COOMatrix , runtime::NDArray row); -/*! @brief Return the data array of the given row */ +/** @brief Return the data array of the given row */ std::pair COOGetRowDataAndIndices(COOMatrix , int64_t row); -/*! @brief Whether the COO matrix contains data */ +/** @brief Whether the COO matrix contains data */ inline bool COOHasData(COOMatrix csr) { return !IsNullArray(csr.data); } -/*! +/** * @brief Check whether the COO is sorted. * * It returns two flags: one for whether the row is sorted; @@ -214,7 +214,7 @@ inline bool COOHasData(COOMatrix csr) { */ std::pair COOIsSorted(COOMatrix coo); -/*! +/** * @brief Get the data and the row,col indices for each returned entries. * * The operator supports matrix with duplicate entries and all the matched entries @@ -230,7 +230,7 @@ std::pair COOIsSorted(COOMatrix coo); std::vector COOGetDataAndIndices( COOMatrix mat, runtime::NDArray rows, runtime::NDArray cols); -/*! @brief Get data. The return type is an ndarray due to possible duplicate entries. */ +/** @brief Get data. The return type is an ndarray due to possible duplicate entries. */ inline runtime::NDArray COOGetAllData(COOMatrix mat, int64_t row, int64_t col) { IdArray rows = VecToIdArray({row}, mat.row->dtype.bits, mat.row->ctx); IdArray cols = VecToIdArray({col}, mat.row->dtype.bits, mat.row->ctx); @@ -238,7 +238,7 @@ inline runtime::NDArray COOGetAllData(COOMatrix mat, int64_t row, int64_t col) { return rst[2]; } -/*! +/** * @brief Get the data for each (row, col) pair. * * The operator supports matrix with duplicate entries but only one matched entry @@ -254,10 +254,10 @@ inline runtime::NDArray COOGetAllData(COOMatrix mat, int64_t row, int64_t col) { */ runtime::NDArray COOGetData(COOMatrix mat, runtime::NDArray rows, runtime::NDArray cols); -/*! @brief Return a transposed COO matrix */ +/** @brief Return a transposed COO matrix */ COOMatrix COOTranspose(COOMatrix coo); -/*! +/** * @brief Convert COO matrix to CSR matrix. * * If the input COO matrix does not have data array, the data array of @@ -281,7 +281,7 @@ COOMatrix COOTranspose(COOMatrix coo); */ CSRMatrix COOToCSR(COOMatrix coo); -/*! +/** * @brief Slice rows of the given matrix and return. * @param coo COO matrix * @param start Start row id (inclusive) @@ -290,7 +290,7 @@ CSRMatrix COOToCSR(COOMatrix coo); COOMatrix COOSliceRows(COOMatrix coo, int64_t start, int64_t end); COOMatrix COOSliceRows(COOMatrix coo, runtime::NDArray rows); -/*! +/** * @brief Get the submatrix specified by the row and col ids. * * In numpy notation, given matrix M, row index array I, col index array J @@ -303,16 +303,16 @@ COOMatrix COOSliceRows(COOMatrix coo, runtime::NDArray rows); */ COOMatrix COOSliceMatrix(COOMatrix coo, runtime::NDArray rows, runtime::NDArray cols); -/*! @return True if the matrix has duplicate entries */ +/** @return True if the matrix has duplicate entries */ bool COOHasDuplicate(COOMatrix coo); -/*! +/** * @brief Deduplicate the entries of a sorted COO matrix, replacing the data with the * number of occurrences of the row-col coordinates. */ std::pair COOCoalesce(COOMatrix coo); -/*! +/** * @brief Sort the indices of a COO matrix in-place. * * The function sorts row indices in ascending order. If sort_column is true, @@ -327,7 +327,7 @@ std::pair COOCoalesce(COOMatrix coo); */ void COOSort_(COOMatrix* mat, bool sort_column = false); -/*! +/** * @brief Sort the indices of a COO matrix. * * The function sorts row indices in ascending order. If sort_column is true, @@ -352,14 +352,14 @@ inline COOMatrix COOSort(COOMatrix mat, bool sort_column = false) { return ret; } -/*! +/** * @brief Remove entries from COO matrix by entry indices (data indices) * @return A new COO matrix as well as a mapping from the new COO entries to the old COO * entries. */ COOMatrix COORemove(COOMatrix coo, IdArray entries); -/*! +/** * @brief Reorder the rows and colmns according to the new row and column order. * @param csr The input coo matrix. * @param new_row_ids the new row Ids (the index is the old row Id) @@ -367,7 +367,7 @@ COOMatrix COORemove(COOMatrix coo, IdArray entries); */ COOMatrix COOReorder(COOMatrix coo, runtime::NDArray new_row_ids, runtime::NDArray new_col_ids); -/*! +/** * @brief Randomly select a fixed number of non-zero entries along each given row independently. * * The function performs random choices along each row independently. @@ -410,7 +410,7 @@ COOMatrix COORowWiseSampling( NDArray prob_or_mask = NDArray(), bool replace = true); -/*! +/** * @brief Randomly select a fixed number of non-zero entries for each edge type * along each given row independently. * @@ -462,7 +462,7 @@ COOMatrix COORowWisePerEtypeSampling( const std::vector& prob_or_mask, bool replace = true); -/*! +/** * @brief Select K non-zero entries with the largest weights along each given row. * * The function performs top-k selection along each row independently. @@ -506,7 +506,7 @@ COOMatrix COORowWiseTopk( NDArray weight, bool ascending = false); -/*! +/** * @brief Union two COOMatrix into one COOMatrix. * * Two Matrix must have the same shape. @@ -538,7 +538,7 @@ COOMatrix COORowWiseTopk( COOMatrix UnionCoo( const std::vector& coos); -/*! +/** * @brief DisjointUnion a list COOMatrix into one COOMatrix. * * Examples: @@ -573,7 +573,7 @@ COOMatrix UnionCoo( COOMatrix DisjointUnionCoo( const std::vector& coos); -/*! +/** * @brief COOMatrix toSimple. * * A = [[0, 0, 0], @@ -597,7 +597,7 @@ COOMatrix DisjointUnionCoo( */ std::tuple COOToSimple(const COOMatrix& coo); -/*! +/** * @brief Split a COOMatrix into multiple disjoin components. * * Examples: @@ -648,7 +648,7 @@ std::vector DisjointPartitionCooBySizes( const std::vector &src_vertex_cumsum, const std::vector &dst_vertex_cumsum); -/*! +/** * @brief Slice a contiguous chunk from a COOMatrix * * Examples: @@ -689,7 +689,7 @@ COOMatrix COOSliceContiguousChunk( const std::vector &src_vertex_range, const std::vector &dst_vertex_range); -/*! +/** * @brief Create a LineGraph of input coo * * A = [[0, 0, 1], diff --git a/include/dgl/aten/csr.h b/include/dgl/aten/csr.h index 56561f1c0208..46a1751a0f7d 100644 --- a/include/dgl/aten/csr.h +++ b/include/dgl/aten/csr.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/aten/csr.h * @brief Common CSR operations required by DGL. @@ -8,25 +8,27 @@ #include #include -#include -#include + #include +#include #include -#include "./types.h" +#include + #include "./array_ops.h" -#include "./spmat.h" #include "./macro.h" +#include "./spmat.h" +#include "./types.h" namespace dgl { namespace aten { struct COOMatrix; -/*! +/** * @brief Plain CSR matrix * - * The column indices are 0-based and are not necessarily sorted. The data array stores - * integer ids for reading edge features. + * The column indices are 0-based and are not necessarily sorted. The data array + * stores integer ids for reading edge features. * * Note that we do allow duplicate non-zero entries -- multiple non-zero entries * that have the same row, col indices. It corresponds to multigraph in @@ -36,21 +38,22 @@ struct COOMatrix; constexpr uint64_t kDGLSerialize_AtenCsrMatrixMagic = 0xDD6cd31205dff127; struct CSRMatrix { - /*! @brief the dense shape of the matrix */ + /** @brief the dense shape of the matrix */ int64_t num_rows = 0, num_cols = 0; - /*! @brief CSR index arrays */ + /** @brief CSR index arrays */ IdArray indptr, indices; - /*! @brief data index array. When is null, assume it is from 0 to NNZ - 1. */ + /** @brief data index array. When is null, assume it is from 0 to NNZ - 1. */ IdArray data; - /*! @brief whether the column indices per row are sorted */ + /** @brief whether the column indices per row are sorted */ bool sorted = false; - /*! @brief whether the matrix is in pinned memory */ + /** @brief whether the matrix is in pinned memory */ bool is_pinned = false; - /*! @brief default constructor */ + /** @brief default constructor */ CSRMatrix() = default; - /*! @brief constructor */ - CSRMatrix(int64_t nrows, int64_t ncols, IdArray parr, IdArray iarr, - IdArray darr = NullArray(), bool sorted_flag = false) + /** @brief constructor */ + CSRMatrix( + int64_t nrows, int64_t ncols, IdArray parr, IdArray iarr, + IdArray darr = NullArray(), bool sorted_flag = false) : num_rows(nrows), num_cols(ncols), indptr(parr), @@ -60,7 +63,7 @@ struct CSRMatrix { CheckValidity(); } - /*! @brief constructor from SparseMatrix object */ + /** @brief constructor from SparseMatrix object */ explicit CSRMatrix(const SparseMatrix& spmat) : num_rows(spmat.num_rows), num_cols(spmat.num_cols), @@ -73,8 +76,9 @@ struct CSRMatrix { // Convert to a SparseMatrix object that can return to python. SparseMatrix ToSparseMatrix() const { - return SparseMatrix(static_cast(SparseFormat::kCSR), num_rows, - num_cols, {indptr, indices, data}, {sorted}); + return SparseMatrix( + static_cast(SparseFormat::kCSR), num_rows, num_cols, + {indptr, indices, data}, {sorted}); } bool Load(dmlc::Stream* fs) { @@ -114,25 +118,24 @@ struct CSRMatrix { CHECK_EQ(indptr->shape[0], num_rows + 1); } - /*! @brief Return a copy of this matrix on the give device context. */ - inline CSRMatrix CopyTo(const DGLContext &ctx) const { - if (ctx == indptr->ctx) - return *this; - return CSRMatrix(num_rows, num_cols, indptr.CopyTo(ctx), indices.CopyTo(ctx), - aten::IsNullArray(data) ? data : data.CopyTo(ctx), sorted); + /** @brief Return a copy of this matrix on the give device context. */ + inline CSRMatrix CopyTo(const DGLContext& ctx) const { + if (ctx == indptr->ctx) return *this; + return CSRMatrix( + num_rows, num_cols, indptr.CopyTo(ctx), indices.CopyTo(ctx), + aten::IsNullArray(data) ? data : data.CopyTo(ctx), sorted); } - /*! - * @brief Pin the indptr, indices and data (if not Null) of the matrix. - * @note This is an in-place method. Behavior depends on the current context, - * kDGLCPU: will be pinned; - * IsPinned: directly return; - * kDGLCUDA: invalid, will throw an error. - * The context check is deferred to pinning the NDArray. - */ + /** + * @brief Pin the indptr, indices and data (if not Null) of the matrix. + * @note This is an in-place method. Behavior depends on the current context, + * kDGLCPU: will be pinned; + * IsPinned: directly return; + * kDGLCUDA: invalid, will throw an error. + * The context check is deferred to pinning the NDArray. + */ inline void PinMemory_() { - if (is_pinned) - return; + if (is_pinned) return; indptr.PinMemory_(); indices.PinMemory_(); if (!aten::IsNullArray(data)) { @@ -141,16 +144,15 @@ struct CSRMatrix { is_pinned = true; } - /*! - * @brief Unpin the indptr, indices and data (if not Null) of the matrix. - * @note This is an in-place method. Behavior depends on the current context, - * IsPinned: will be unpinned; - * others: directly return. - * The context check is deferred to unpinning the NDArray. - */ + /** + * @brief Unpin the indptr, indices and data (if not Null) of the matrix. + * @note This is an in-place method. Behavior depends on the current context, + * IsPinned: will be unpinned; + * others: directly return. + * The context check is deferred to unpinning the NDArray. + */ inline void UnpinMemory_() { - if (!is_pinned) - return; + if (!is_pinned) return; indptr.UnpinMemory_(); indices.UnpinMemory_(); if (!aten::IsNullArray(data)) { @@ -159,8 +161,9 @@ struct CSRMatrix { is_pinned = false; } - /*! - * @brief Record stream for the indptr, indices and data (if not Null) of the matrix. + /** + * @brief Record stream for the indptr, indices and data (if not Null) of the + * matrix. * @param stream The stream that is using the graph */ inline void RecordStream(DGLStreamHandle stream) const { @@ -174,52 +177,54 @@ struct CSRMatrix { ///////////////////////// CSR routines ////////////////////////// -/*! @brief Return true if the value (row, col) is non-zero */ -bool CSRIsNonZero(CSRMatrix , int64_t row, int64_t col); -/*! +/** @brief Return true if the value (row, col) is non-zero */ +bool CSRIsNonZero(CSRMatrix, int64_t row, int64_t col); +/** * @brief Batched implementation of CSRIsNonZero. - * @note This operator allows broadcasting (i.e, either row or col can be of length 1). + * @note This operator allows broadcasting (i.e, either row or col can be of + * length 1). */ -runtime::NDArray CSRIsNonZero(CSRMatrix, runtime::NDArray row, runtime::NDArray col); +runtime::NDArray CSRIsNonZero( + CSRMatrix, runtime::NDArray row, runtime::NDArray col); -/*! @brief Return the nnz of the given row */ -int64_t CSRGetRowNNZ(CSRMatrix , int64_t row); -runtime::NDArray CSRGetRowNNZ(CSRMatrix , runtime::NDArray row); +/** @brief Return the nnz of the given row */ +int64_t CSRGetRowNNZ(CSRMatrix, int64_t row); +runtime::NDArray CSRGetRowNNZ(CSRMatrix, runtime::NDArray row); -/*! @brief Return the column index array of the given row */ -runtime::NDArray CSRGetRowColumnIndices(CSRMatrix , int64_t row); +/** @brief Return the column index array of the given row */ +runtime::NDArray CSRGetRowColumnIndices(CSRMatrix, int64_t row); -/*! @brief Return the data array of the given row */ -runtime::NDArray CSRGetRowData(CSRMatrix , int64_t row); +/** @brief Return the data array of the given row */ +runtime::NDArray CSRGetRowData(CSRMatrix, int64_t row); -/*! @brief Whether the CSR matrix contains data */ -inline bool CSRHasData(CSRMatrix csr) { - return !IsNullArray(csr.data); -} +/** @brief Whether the CSR matrix contains data */ +inline bool CSRHasData(CSRMatrix csr) { return !IsNullArray(csr.data); } -/*! @brief Whether the column indices of each row is sorted. */ +/** @brief Whether the column indices of each row is sorted. */ bool CSRIsSorted(CSRMatrix csr); -/*! +/** * @brief Get the data and the row,col indices for each returned entries. * - * The operator supports matrix with duplicate entries and all the matched entries - * will be returned. The operator assumes there is NO duplicate (row, col) pair - * in the given input. Otherwise, the returned result is undefined. + * The operator supports matrix with duplicate entries and all the matched + * entries will be returned. The operator assumes there is NO duplicate (row, + * col) pair in the given input. Otherwise, the returned result is undefined. * * If some (row, col) pairs do not contain a valid non-zero elements, * they will not be included in the return arrays. * - * @note This operator allows broadcasting (i.e, either row or col can be of length 1). + * @note This operator allows broadcasting (i.e, either row or col can be of + * length 1). * @param mat Sparse matrix * @param rows Row index * @param cols Column index * @return Three arrays {rows, cols, data} */ std::vector CSRGetDataAndIndices( - CSRMatrix , runtime::NDArray rows, runtime::NDArray cols); + CSRMatrix, runtime::NDArray rows, runtime::NDArray cols); -/* @brief Get data. The return type is an ndarray due to possible duplicate entries. */ +/* @brief Get data. The return type is an ndarray due to possible duplicate + * entries. */ inline runtime::NDArray CSRGetAllData(CSRMatrix mat, int64_t row, int64_t col) { const auto& nbits = mat.indptr->dtype.bits; const auto& ctx = mat.indptr->ctx; @@ -229,54 +234,60 @@ inline runtime::NDArray CSRGetAllData(CSRMatrix mat, int64_t row, int64_t col) { return rst[2]; } -/*! +/** * @brief Get the data for each (row, col) pair. * - * The operator supports matrix with duplicate entries but only one matched entry - * will be returned for each (row, col) pair. Support duplicate input (row, col) - * pairs. + * The operator supports matrix with duplicate entries but only one matched + * entry will be returned for each (row, col) pair. Support duplicate input + * (row, col) pairs. * * If some (row, col) pairs do not contain a valid non-zero elements, * their data values are filled with -1. * - * @note This operator allows broadcasting (i.e, either row or col can be of length 1). + * @note This operator allows broadcasting (i.e, either row or col can be of + * length 1). * * @param mat Sparse matrix. * @param rows Row index. * @param cols Column index. * @return Data array. The i^th element is the data of (rows[i], cols[i]) */ -runtime::NDArray CSRGetData(CSRMatrix, runtime::NDArray rows, runtime::NDArray cols); +runtime::NDArray CSRGetData( + CSRMatrix, runtime::NDArray rows, runtime::NDArray cols); -/*! - * @brief Get the data for each (row, col) pair, then index into the weights array. +/** + * @brief Get the data for each (row, col) pair, then index into the weights + * array. * - * The operator supports matrix with duplicate entries but only one matched entry - * will be returned for each (row, col) pair. Support duplicate input (row, col) - * pairs. + * The operator supports matrix with duplicate entries but only one matched + * entry will be returned for each (row, col) pair. Support duplicate input + * (row, col) pairs. * - * If some (row, col) pairs do not contain a valid non-zero elements to index into the - * weights array, DGL returns the value \a filler for that pair instead. + * If some (row, col) pairs do not contain a valid non-zero elements to index + * into the weights array, DGL returns the value \a filler for that pair + * instead. * - * @note This operator allows broadcasting (i.e, either row or col can be of length 1). + * @note This operator allows broadcasting (i.e, either row or col can be of + * length 1). * * @tparam DType the data type of the weights array. * @param mat Sparse matrix. * @param rows Row index. * @param cols Column index. * @param weights The weights array. - * @param filler The value to return for row-column pairs not existent in the matrix. + * @param filler The value to return for row-column pairs not existent in the + * matrix. * @return Data array. The i^th element is the data of (rows[i], cols[i]) */ template runtime::NDArray CSRGetData( - CSRMatrix, runtime::NDArray rows, runtime::NDArray cols, runtime::NDArray weights, - DType filler); + CSRMatrix, runtime::NDArray rows, runtime::NDArray cols, + runtime::NDArray weights, DType filler); -/*! @brief Return a transposed CSR matrix */ +/** @brief Return a transposed CSR matrix */ CSRMatrix CSRTranspose(CSRMatrix csr); -/*! +/** * @brief Convert CSR matrix to COO matrix. * * Complexity: O(nnz) @@ -288,15 +299,15 @@ CSRMatrix CSRTranspose(CSRMatrix csr); * column sorted. * * @param csr Input csr matrix - * @param data_as_order If true, the data array in the input csr matrix contains the order - * by which the resulting COO tuples are stored. In this case, the - * data array of the resulting COO matrix will be empty because it - * is essentially a consecutive range. + * @param data_as_order If true, the data array in the input csr matrix contains + * the order by which the resulting COO tuples are stored. In this case, the + * data array of the resulting COO matrix will be empty + * because it is essentially a consecutive range. * @return a coo matrix */ COOMatrix CSRToCOO(CSRMatrix csr, bool data_as_order); -/*! +/** * @brief Slice rows of the given matrix and return. * * The sliced row IDs are relabeled to starting from zero. @@ -322,7 +333,7 @@ COOMatrix CSRToCOO(CSRMatrix csr, bool data_as_order); CSRMatrix CSRSliceRows(CSRMatrix csr, int64_t start, int64_t end); CSRMatrix CSRSliceRows(CSRMatrix csr, runtime::NDArray rows); -/*! +/** * @brief Get the submatrix specified by the row and col ids. * * In numpy notation, given matrix M, row index array I, col index array J @@ -339,16 +350,17 @@ CSRMatrix CSRSliceRows(CSRMatrix csr, runtime::NDArray rows); * @param cols The col index to select * @return submatrix */ -CSRMatrix CSRSliceMatrix(CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols); +CSRMatrix CSRSliceMatrix( + CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols); -/*! @return True if the matrix has duplicate entries */ +/** @return True if the matrix has duplicate entries */ bool CSRHasDuplicate(CSRMatrix csr); -/*! +/** * @brief Sort the column index at each row in ascending order in-place. * - * Only the indices and data arrays (if available) will be mutated. The indptr array - * stays the same. + * Only the indices and data arrays (if available) will be mutated. The indptr + * array stays the same. * * Examples: * num_rows = 4 @@ -363,39 +375,39 @@ bool CSRHasDuplicate(CSRMatrix csr); */ void CSRSort_(CSRMatrix* csr); -/*! +/** * @brief Sort the column index at each row in ascending order. * * Return a new CSR matrix with sorted column indices and data arrays. */ inline CSRMatrix CSRSort(CSRMatrix csr) { - if (csr.sorted) - return csr; - CSRMatrix ret(csr.num_rows, csr.num_cols, - csr.indptr, csr.indices.Clone(), - CSRHasData(csr)? csr.data.Clone() : csr.data, - csr.sorted); + if (csr.sorted) return csr; + CSRMatrix ret( + csr.num_rows, csr.num_cols, csr.indptr, csr.indices.Clone(), + CSRHasData(csr) ? csr.data.Clone() : csr.data, csr.sorted); CSRSort_(&ret); return ret; } -/*! +/** * @brief Reorder the rows and colmns according to the new row and column order. * @param csr The input csr matrix. * @param new_row_ids the new row Ids (the index is the old row Id) * @param new_col_ids the new column Ids (the index is the old col Id). */ -CSRMatrix CSRReorder(CSRMatrix csr, runtime::NDArray new_row_ids, runtime::NDArray new_col_ids); +CSRMatrix CSRReorder( + CSRMatrix csr, runtime::NDArray new_row_ids, runtime::NDArray new_col_ids); -/*! +/** * @brief Remove entries from CSR matrix by entry indices (data indices) - * @return A new CSR matrix as well as a mapping from the new CSR entries to the old CSR - * entries. + * @return A new CSR matrix as well as a mapping from the new CSR entries to the + * old CSR entries. */ CSRMatrix CSRRemove(CSRMatrix csr, IdArray entries); -/*! - * @brief Randomly select a fixed number of non-zero entries along each given row independently. +/** + * @brief Randomly select a fixed number of non-zero entries along each given + * row independently. * * The function performs random choices along each row independently. * The picked indices are returned in the form of a COO matrix. @@ -431,13 +443,10 @@ CSRMatrix CSRRemove(CSRMatrix csr, IdArray entries); * @note The edges of the entire graph must be ordered by their edge types. */ COOMatrix CSRRowWiseSampling( - CSRMatrix mat, - IdArray rows, - int64_t num_samples, - NDArray prob_or_mask = NDArray(), - bool replace = true); + CSRMatrix mat, IdArray rows, int64_t num_samples, + NDArray prob_or_mask = NDArray(), bool replace = true); -/*! +/** * @brief Randomly select a fixed number of non-zero entries for each edge type * along each given row independently. * @@ -460,8 +469,8 @@ COOMatrix CSRRowWiseSampling( * CSRMatrix csr = ...; * IdArray rows = ... ; // [0, 3] * std::vector num_samples = {2, 2, 2}; - * COOMatrix sampled = CSRRowWisePerEtypeSampling(csr, rows, eid2etype_offset, num_samples, - * FloatArray(), false); + * COOMatrix sampled = CSRRowWisePerEtypeSampling(csr, rows, eid2etype_offset, + * num_samples, FloatArray(), false); * // possible sampled coo matrix: * // sampled.num_rows = 4 * // sampled.num_cols = 4 @@ -477,21 +486,20 @@ COOMatrix CSRRowWiseSampling( * Should be of the same length as the data array. * If an empty array is provided, assume uniform. * @param replace True if sample with replacement - * @param rowwise_etype_sorted whether the CSR column indices per row are ordered by edge type. + * @param rowwise_etype_sorted whether the CSR column indices per row are + * ordered by edge type. * @return A COOMatrix storing the picked row, col and data indices. * @note The edges must be ordered by their edge types. */ COOMatrix CSRRowWisePerEtypeSampling( - CSRMatrix mat, - IdArray rows, - const std::vector& eid2etype_offset, + CSRMatrix mat, IdArray rows, const std::vector& eid2etype_offset, const std::vector& num_samples, - const std::vector& prob_or_mask, - bool replace = true, + const std::vector& prob_or_mask, bool replace = true, bool rowwise_etype_sorted = false); -/*! - * @brief Select K non-zero entries with the largest weights along each given row. +/** + * @brief Select K non-zero entries with the largest weights along each given + * row. * * The function performs top-k selection along each row independently. * The picked indices are returned in the form of a COO matrix. @@ -520,35 +528,32 @@ COOMatrix CSRRowWisePerEtypeSampling( * @param mat Input CSR matrix. * @param rows Rows to sample from. * @param k The K value. - * @param weight Weight associated with each entry. Should be of the same length as the - * data array. If an empty array is provided, assume uniform. - * @param ascending If true, elements are sorted by ascending order, equivalent to find - * the K smallest values. Otherwise, find K largest values. - * @return A COOMatrix storing the picked row and col indices. Its data field stores the - * the index of the picked elements in the value array. + * @param weight Weight associated with each entry. Should be of the same length + * as the data array. If an empty array is provided, assume uniform. + * @param ascending If true, elements are sorted by ascending order, equivalent + * to find the K smallest values. Otherwise, find K largest values. + * @return A COOMatrix storing the picked row and col indices. Its data field + * stores the the index of the picked elements in the value array. */ COOMatrix CSRRowWiseTopk( - CSRMatrix mat, - IdArray rows, - int64_t k, - FloatArray weight, + CSRMatrix mat, IdArray rows, int64_t k, FloatArray weight, bool ascending = false); - - -/*! - * @brief Randomly select a fixed number of non-zero entries along each given row independently, - * where the probability of columns to be picked can be biased according to its tag. +/** + * @brief Randomly select a fixed number of non-zero entries along each given + * row independently, where the probability of columns to be picked can be + * biased according to its tag. * - * Each column is assigned an integer tag which determines its probability to be sampled. - * Users can assign different probability to different tags. + * Each column is assigned an integer tag which determines its probability to be + * sampled. Users can assign different probability to different tags. * - * This function only works with a CSR matrix sorted according to the tag so that entries with - * the same column tag are arranged in a consecutive range, and the input `tag_offset` represents - * the boundaries of these ranges. However, the function itself will not check if the input matrix - * has been sorted. It's the caller's responsibility to ensure the input matrix has been sorted - * by `CSRSortByTag` (it will also return a NDArray `tag_offset` which should be used as an input - * of this function). + * This function only works with a CSR matrix sorted according to the tag so + * that entries with the same column tag are arranged in a consecutive range, + * and the input `tag_offset` represents the boundaries of these ranges. + * However, the function itself will not check if the input matrix has been + * sorted. It's the caller's responsibility to ensure the input matrix has been + * sorted by `CSRSortByTag` (it will also return a NDArray `tag_offset` which + * should be used as an input of this function). * * The picked indices are returned in the form of a COO matrix. * @@ -576,53 +581,48 @@ COOMatrix CSRRowWiseTopk( * // sampled.rows = [0, 1] * // sampled.cols = [1, 2] * // sampled.data = [2, 0] - * // Note that in this case, for row 1, the column 3 will never be picked as it has tag 1 and the + * // Note that in this case, for row 1, the column 3 will never be picked as it + * has tag 1 and the * // probability of tag 1 is 0. * * * @param mat Input CSR matrix. * @param rows Rows to sample from. * @param num_samples Number of samples. - * @param tag_offset The boundaries of tags. Should be of the shape [num_row, num_tags+1] + * @param tag_offset The boundaries of tags. Should be of the shape [num_row, + * num_tags+1] * @param bias Unnormalized probability array. Should be of length num_tags * @param replace True if sample with replacement - * @return A COOMatrix storing the picked row and col indices. Its data field stores the - * the index of the picked elements in the value array. + * @return A COOMatrix storing the picked row and col indices. Its data field + * stores the the index of the picked elements in the value array. * */ COOMatrix CSRRowWiseSamplingBiased( - CSRMatrix mat, - IdArray rows, - int64_t num_samples, - NDArray tag_offset, - FloatArray bias, - bool replace = true -); + CSRMatrix mat, IdArray rows, int64_t num_samples, NDArray tag_offset, + FloatArray bias, bool replace = true); -/*! - * @brief Uniformly sample row-column pairs whose entries do not exist in the given - * sparse matrix using rejection sampling. +/** + * @brief Uniformly sample row-column pairs whose entries do not exist in the + * given sparse matrix using rejection sampling. * - * @note The number of samples returned may not necessarily be the number of samples - * given. + * @note The number of samples returned may not necessarily be the number of + * samples given. * * @param csr The CSR matrix. * @param num_samples The number of samples. * @param num_trials The number of trials. - * @param exclude_self_loops Do not include the examples where the row equals the column. + * @param exclude_self_loops Do not include the examples where the row equals + * the column. * @param replace Whether to sample with replacement. - * @param redundancy How much redundant negative examples to take in case of duplicate examples. + * @param redundancy How much redundant negative examples to take in case of + * duplicate examples. * @return A pair of row and column tensors. */ std::pair CSRGlobalUniformNegativeSampling( - const CSRMatrix& csr, - int64_t num_samples, - int num_trials, - bool exclude_self_loops, - bool replace, - double redundancy); - -/*! + const CSRMatrix& csr, int64_t num_samples, int num_trials, + bool exclude_self_loops, bool replace, double redundancy); + +/** * @brief Sort the column index according to the tag of each column. * * Example: @@ -647,14 +647,13 @@ std::pair CSRGlobalUniformNegativeSampling( * @param tag_array Tag of each column. IdArray with length num_cols * @param num_tags Number of tags. It should be equal to max(tag_array)+1. * @return 1. A sorted copy of the given CSR matrix - * 2. The split positions of different tags. NDArray of shape (num_rows, num_tags + 1) + * 2. The split positions of different tags. NDArray of shape (num_rows, + * num_tags + 1) */ std::pair CSRSortByTag( - const CSRMatrix &csr, - const IdArray tag_array, - int64_t num_tags); + const CSRMatrix& csr, const IdArray tag_array, int64_t num_tags); -/* +/** * @brief Union two CSRMatrix into one CSRMatrix. * * Two Matrix must have the same shape. @@ -683,10 +682,9 @@ std::pair CSRSortByTag( * CSRMatrix_C.num_rows : 3 * CSRMatrix_C.num_cols : 4 */ -CSRMatrix UnionCsr( - const std::vector& csrs); +CSRMatrix UnionCsr(const std::vector& csrs); -/*! +/** * @brief Union a list CSRMatrix into one CSRMatrix. * * Examples: @@ -714,14 +712,15 @@ CSRMatrix UnionCsr( * CSRMatrix_C.num_cols : 5 * * @param csrs The input list of csr matrix. - * @param src_offset A list of integers recording src vertix id offset of each Matrix in csrs - * @param src_offset A list of integers recording dst vertix id offset of each Matrix in csrs + * @param src_offset A list of integers recording src vertix id offset of each + * Matrix in csrs + * @param src_offset A list of integers recording dst vertix id offset of each + * Matrix in csrs * @return The combined CSRMatrix. */ -CSRMatrix DisjointUnionCsr( - const std::vector& csrs); +CSRMatrix DisjointUnionCsr(const std::vector& csrs); -/*! +/** * @brief CSRMatrix toSimple. * * A = [[0, 0, 0], @@ -739,13 +738,13 @@ CSRMatrix DisjointUnionCsr( * edge_map = [0, 0, 0, 1, 1, 2, 3, 4, 4, 4, 4] * * @return The simplified CSRMatrix - * The count recording the number of duplicated edges from the original graph. - * The edge mapping from the edge IDs of original graph to those of the + * The count recording the number of duplicated edges from the original + * graph. The edge mapping from the edge IDs of original graph to those of the * returned graph. */ std::tuple CSRToSimple(const CSRMatrix& csr); -/*! +/** * @brief Split a CSRMatrix into multiple disjoint components. * * Examples: @@ -790,13 +789,12 @@ std::tuple CSRToSimple(const CSRMatrix& csr); * @return A list of CSRMatrixes representing each disjoint components. */ std::vector DisjointPartitionCsrBySizes( - const CSRMatrix &csrs, - const uint64_t batch_size, - const std::vector &edge_cumsum, - const std::vector &src_vertex_cumsum, - const std::vector &dst_vertex_cumsum); + const CSRMatrix& csrs, const uint64_t batch_size, + const std::vector& edge_cumsum, + const std::vector& src_vertex_cumsum, + const std::vector& dst_vertex_cumsum); -/*! +/** * @brief Slice a contiguous chunk from a CSRMatrix * * Examples: @@ -832,10 +830,9 @@ std::vector DisjointPartitionCsrBySizes( * @return CSRMatrix representing the chunk. */ CSRMatrix CSRSliceContiguousChunk( - const CSRMatrix &csr, - const std::vector &edge_range, - const std::vector &src_vertex_range, - const std::vector &dst_vertex_range); + const CSRMatrix& csr, const std::vector& edge_range, + const std::vector& src_vertex_range, + const std::vector& dst_vertex_range); } // namespace aten } // namespace dgl diff --git a/include/dgl/aten/macro.h b/include/dgl/aten/macro.h index 9e1d92615a5c..fb9fe45ff666 100644 --- a/include/dgl/aten/macro.h +++ b/include/dgl/aten/macro.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/aten/macro.h * @brief Common macros for aten package. @@ -9,7 +9,7 @@ ///////////////////////// Dispatchers ////////////////////////// -/* +/** * Dispatch according to device: * * ATEN_XPU_SWITCH(array->ctx.device_type, XPU, { @@ -28,7 +28,7 @@ } \ } while (0) -/* +/** * Dispatch according to device: * * XXX(minjie): temporary macro that allows CUDA operator @@ -59,7 +59,7 @@ #define ATEN_XPU_SWITCH_CUDA ATEN_XPU_SWITCH #endif // DGL_USE_CUDA -/* +/** * Dispatch according to integral type (either int32 or int64): * * ATEN_ID_TYPE_SWITCH(array->dtype, IdType, { @@ -81,7 +81,7 @@ } \ } while (0) -/* +/** * Dispatch according to bits (either int32 or int64): * * ATEN_ID_BITS_SWITCH(bits, IdType, { @@ -104,7 +104,7 @@ } \ } while (0) -/* +/** * Dispatch according to float type (either float32 or float64): * * ATEN_FLOAT_TYPE_SWITCH(array->dtype, FloatType, { @@ -128,7 +128,7 @@ } \ } while (0) -/* +/** * Dispatch according to float type, including 16bits (float16/bfloat16/float32/float64). */ #ifdef DGL_USE_CUDA @@ -185,7 +185,7 @@ ATEN_FLOAT_TYPE_SWITCH(val, FloatType, val_name, {__VA_ARGS__}) #endif // DGL_USE_CUDA -/* +/** * Dispatch according to data type (int32, int64, float32 or float64): * * ATEN_DTYPE_SWITCH(array->dtype, DType, { @@ -212,7 +212,7 @@ } \ } while (0) -/* +/** * Dispatch according to data type (int8, uint8, float32 or float64): * * ATEN_FLOAT_INT8_UINT8_TYPE_SWITCH(array->dtype, DType, { @@ -239,7 +239,7 @@ } \ } while (0) -/* +/** * Dispatch data type only based on bit-width (8-bit, 16-bit, 32-bit, 64-bit): * * ATEN_DTYPE_BITS_ONLY_SWITCH(array->dtype, DType, { @@ -268,7 +268,7 @@ } \ } while (0) -/* +/** * Dispatch according to integral type of CSR graphs. * Identical to ATEN_ID_TYPE_SWITCH except for a different error message. */ @@ -306,7 +306,7 @@ << "context as " << (#VAR1) << "(" << (VAR1)->ctx << "). " \ << "Or " << (#VAR1) << "(" << (VAR1)->ctx << ")" << " is pinned"; -/* +/** * Macro to dispatch according to the context of array and dtype of csr * to enable CUDA UVA ops. * Context check is covered here to avoid confusion with CHECK_SAME_CONTEXT. diff --git a/include/dgl/aten/spmat.h b/include/dgl/aten/spmat.h index 1caf4c4f0bca..a11618dc5942 100644 --- a/include/dgl/aten/spmat.h +++ b/include/dgl/aten/spmat.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/aten/spmat.h * @brief Sparse matrix definitions @@ -14,7 +14,7 @@ namespace dgl { -/*! +/** * @brief Sparse format. */ enum class SparseFormat { @@ -23,7 +23,7 @@ enum class SparseFormat { kCSC = 3, }; -/*! +/** * @brief Sparse format codes */ const dgl_format_code_t ALL_CODE = 0x7; diff --git a/include/dgl/aten/types.h b/include/dgl/aten/types.h index ba5d79f7d250..a2dd7dcdaf88 100644 --- a/include/dgl/aten/types.h +++ b/include/dgl/aten/types.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/aten/types.h * @brief Array and ID types @@ -14,7 +14,7 @@ namespace dgl { typedef uint64_t dgl_id_t; typedef uint64_t dgl_type_t; -/*! @brief Type for dgl fomrat code, whose binary representation indices +/** @brief Type for dgl fomrat code, whose binary representation indices * which sparse format is in use and which is not. * * Suppose the binary representation is xyz, then diff --git a/include/dgl/base_heterograph.h b/include/dgl/base_heterograph.h index d461a389a121..5d58d811a1b7 100644 --- a/include/dgl/base_heterograph.h +++ b/include/dgl/base_heterograph.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file dgl/heterograph_interface.h * @brief DGL heterogeneous graph index class. @@ -31,13 +31,13 @@ typedef std::shared_ptr FlattenedHeteroGraphPtr; struct HeteroSubgraph; -/*! @brief Enum class for edge direction */ +/** @brief Enum class for edge direction */ enum class EdgeDir { kIn, // in edge direction kOut // out edge direction }; -/*! +/** * @brief Base heterogenous graph. * * In heterograph, nodes represent entities and edges represent relations. @@ -58,22 +58,22 @@ class BaseHeteroGraph : public runtime::Object { ////////////////////// query/operations on meta graph /////////////////////// - /*! @return the number of vertex types */ + /** @return the number of vertex types */ virtual uint64_t NumVertexTypes() const { return meta_graph_->NumVertices(); } - /*! @return the number of edge types */ + /** @return the number of edge types */ virtual uint64_t NumEdgeTypes() const { return meta_graph_->NumEdges(); } - /*! @return given the edge type, find the source type */ + /** @return given the edge type, find the source type */ virtual std::pair GetEndpointTypes( dgl_type_t etype) const { return meta_graph_->FindEdge(etype); } - /*! @return the meta graph */ + /** @return the meta graph */ virtual GraphPtr meta_graph() const { return meta_graph_; } - /*! + /** * @brief Return the bipartite graph of the given edge type. * @param etype The edge type. * @return The bipartite graph. @@ -82,90 +82,90 @@ class BaseHeteroGraph : public runtime::Object { ///////////////////// query/operations on realized graph ///////////////////// - /*! @brief Add vertices to the given vertex type */ + /** @brief Add vertices to the given vertex type */ virtual void AddVertices(dgl_type_t vtype, uint64_t num_vertices) = 0; - /*! @brief Add one edge to the given edge type */ + /** @brief Add one edge to the given edge type */ virtual void AddEdge(dgl_type_t etype, dgl_id_t src, dgl_id_t dst) = 0; - /*! @brief Add edges to the given edge type */ + /** @brief Add edges to the given edge type */ virtual void AddEdges(dgl_type_t etype, IdArray src_ids, IdArray dst_ids) = 0; - /*! + /** * @brief Clear the graph. Remove all vertices/edges. */ virtual void Clear() = 0; - /*! + /** * @brief Get the data type of node and edge IDs of this graph. */ virtual DGLDataType DataType() const = 0; - /*! + /** * @brief Get the device context of this graph. */ virtual DGLContext Context() const = 0; - /*! + /** * @brief Pin graph. */ virtual void PinMemory_() = 0; - /*! + /** * @brief Check if this graph is pinned. */ virtual bool IsPinned() const = 0; - /*! + /** * @brief Record stream for this graph. * @param stream The stream that is using the graph */ virtual void RecordStream(DGLStreamHandle stream) = 0; - /*! + /** * @brief Get the number of integer bits used to store node/edge ids (32 or * 64). */ // TODO(BarclayII) replace NumBits() calls to DataType() calls virtual uint8_t NumBits() const = 0; - /*! + /** * @return whether the graph is a multigraph */ virtual bool IsMultigraph() const = 0; - /*! @return whether the graph is read-only */ + /** @return whether the graph is read-only */ virtual bool IsReadonly() const = 0; - /*! @return the number of vertices in the graph.*/ + /** @return the number of vertices in the graph.*/ virtual uint64_t NumVertices(dgl_type_t vtype) const = 0; - /*! @return the number of vertices for each type in the graph as a vector */ + /** @return the number of vertices for each type in the graph as a vector */ inline virtual std::vector NumVerticesPerType() const { LOG(FATAL) << "[BUG] NumVerticesPerType() not supported on this object."; return {}; } - /*! @return the number of edges in the graph.*/ + /** @return the number of edges in the graph.*/ virtual uint64_t NumEdges(dgl_type_t etype) const = 0; - /*! @return true if the given vertex is in the graph.*/ + /** @return true if the given vertex is in the graph.*/ virtual bool HasVertex(dgl_type_t vtype, dgl_id_t vid) const = 0; - /*! @return a 0-1 array indicating whether the given vertices are in the + /** @return a 0-1 array indicating whether the given vertices are in the * graph. */ virtual BoolArray HasVertices(dgl_type_t vtype, IdArray vids) const = 0; - /*! @return true if the given edge is in the graph.*/ + /** @return true if the given edge is in the graph.*/ virtual bool HasEdgeBetween( dgl_type_t etype, dgl_id_t src, dgl_id_t dst) const = 0; - /*! @return a 0-1 array indicating whether the given edges are in the graph.*/ + /** @return a 0-1 array indicating whether the given edges are in the graph.*/ virtual BoolArray HasEdgesBetween( dgl_type_t etype, IdArray src_ids, IdArray dst_ids) const = 0; - /*! + /** * @brief Find the predecessors of a vertex. * @note The given vertex should belong to the source vertex type * of the given edge type. @@ -175,7 +175,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual IdArray Predecessors(dgl_type_t etype, dgl_id_t dst) const = 0; - /*! + /** * @brief Find the successors of a vertex. * @note The given vertex should belong to the dest vertex type * of the given edge type. @@ -185,7 +185,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual IdArray Successors(dgl_type_t etype, dgl_id_t src) const = 0; - /*! + /** * @brief Get all edge ids between the two given endpoints * @note The given src and dst vertices should belong to the source vertex * type and the dest vertex type of the given edge type, respectively. \param @@ -195,7 +195,7 @@ class BaseHeteroGraph : public runtime::Object { virtual IdArray EdgeId( dgl_type_t etype, dgl_id_t src, dgl_id_t dst) const = 0; - /*! + /** * @brief Get all edge ids between the given endpoint pairs. * * @param etype The edge type @@ -206,7 +206,7 @@ class BaseHeteroGraph : public runtime::Object { virtual EdgeArray EdgeIdsAll( dgl_type_t etype, IdArray src, IdArray dst) const = 0; - /*! + /** * @brief Get edge ids between the given endpoint pairs. * * Only find one matched edge Ids even if there are multiple matches due to @@ -221,7 +221,7 @@ class BaseHeteroGraph : public runtime::Object { virtual IdArray EdgeIdsOne( dgl_type_t etype, IdArray src, IdArray dst) const = 0; - /*! + /** * @brief Find the edge ID and return the pair of endpoints * @param etype The edge type * @param eid The edge ID @@ -231,7 +231,7 @@ class BaseHeteroGraph : public runtime::Object { virtual std::pair FindEdge( dgl_type_t etype, dgl_id_t eid) const = 0; - /*! + /** * @brief Find the edge IDs and return their source and target node IDs. * @param etype The edge type * @param eids The edge ID array. @@ -240,7 +240,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual EdgeArray FindEdges(dgl_type_t etype, IdArray eids) const = 0; - /*! + /** * @brief Get the in edges of the vertex. * @note The given vertex should belong to the dest vertex type * of the given edge type. @@ -250,7 +250,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual EdgeArray InEdges(dgl_type_t etype, dgl_id_t vid) const = 0; - /*! + /** * @brief Get the in edges of the vertices. * @note The given vertex should belong to the dest vertex type * of the given edge type. @@ -260,7 +260,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual EdgeArray InEdges(dgl_type_t etype, IdArray vids) const = 0; - /*! + /** * @brief Get the out edges of the vertex. * @note The given vertex should belong to the source vertex type * of the given edge type. @@ -270,7 +270,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual EdgeArray OutEdges(dgl_type_t etype, dgl_id_t vid) const = 0; - /*! + /** * @brief Get the out edges of the vertices. * @note The given vertex should belong to the source vertex type * of the given edge type. @@ -280,7 +280,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual EdgeArray OutEdges(dgl_type_t etype, IdArray vids) const = 0; - /*! + /** * @brief Get all the edges in the graph. * @note If order is "srcdst", the returned edges list is sorted by their src * and dst ids. If order is "eid", they are in their edge id order. Otherwise, @@ -291,7 +291,7 @@ class BaseHeteroGraph : public runtime::Object { virtual EdgeArray Edges( dgl_type_t etype, const std::string& order = "") const = 0; - /*! + /** * @brief Get the in degree of the given vertex. * @note The given vertex should belong to the dest vertex type * of the given edge type. @@ -301,7 +301,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual uint64_t InDegree(dgl_type_t etype, dgl_id_t vid) const = 0; - /*! + /** * @brief Get the in degrees of the given vertices. * @note The given vertex should belong to the dest vertex type * of the given edge type. @@ -311,7 +311,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual DegreeArray InDegrees(dgl_type_t etype, IdArray vids) const = 0; - /*! + /** * @brief Get the out degree of the given vertex. * @note The given vertex should belong to the source vertex type * of the given edge type. @@ -321,7 +321,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual uint64_t OutDegree(dgl_type_t etype, dgl_id_t vid) const = 0; - /*! + /** * @brief Get the out degrees of the given vertices. * @note The given vertex should belong to the source vertex type * of the given edge type. @@ -331,7 +331,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual DegreeArray OutDegrees(dgl_type_t etype, IdArray vids) const = 0; - /*! + /** * @brief Return the successor vector * @note The given vertex should belong to the source vertex type * of the given edge type. @@ -340,7 +340,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual DGLIdIters SuccVec(dgl_type_t etype, dgl_id_t vid) const = 0; - /*! + /** * @brief Return the out edge id vector * @note The given vertex should belong to the source vertex type * of the given edge type. @@ -349,7 +349,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual DGLIdIters OutEdgeVec(dgl_type_t etype, dgl_id_t vid) const = 0; - /*! + /** * @brief Return the predecessor vector * @note The given vertex should belong to the dest vertex type * of the given edge type. @@ -358,7 +358,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual DGLIdIters PredVec(dgl_type_t etype, dgl_id_t vid) const = 0; - /*! + /** * @brief Return the in edge id vector * @note The given vertex should belong to the dest vertex type * of the given edge type. @@ -367,7 +367,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual DGLIdIters InEdgeVec(dgl_type_t etype, dgl_id_t vid) const = 0; - /*! + /** * @brief Get the adjacency matrix of the graph. * * TODO(minjie): deprecate this interface; replace it with GetXXXMatrix. @@ -388,7 +388,7 @@ class BaseHeteroGraph : public runtime::Object { virtual std::vector GetAdj( dgl_type_t etype, bool transpose, const std::string& fmt) const = 0; - /*! + /** * @brief Determine which format to use with a preference. * @@ -402,35 +402,35 @@ class BaseHeteroGraph : public runtime::Object { virtual SparseFormat SelectFormat( dgl_type_t etype, dgl_format_code_t preferred_formats) const = 0; - /*! + /** * @brief Return sparse formats already created for the graph. * * @return a number of type dgl_format_code_t. */ virtual dgl_format_code_t GetCreatedFormats() const = 0; - /*! + /** * @brief Return allowed sparse formats for the graph. * * @return a number of type dgl_format_code_t. */ virtual dgl_format_code_t GetAllowedFormats() const = 0; - /*! + /** * @brief Return the graph in specified available formats. * * @return The new graph. */ virtual HeteroGraphPtr GetGraphInFormat(dgl_format_code_t formats) const = 0; - /*! + /** * @brief Get adjacency matrix in COO format. * @param etype Edge type. * @return COO matrix. */ virtual aten::COOMatrix GetCOOMatrix(dgl_type_t etype) const = 0; - /*! + /** * @brief Get adjacency matrix in CSR format. * * The row and column sizes are equal to the number of dsttype and srctype @@ -441,7 +441,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual aten::CSRMatrix GetCSRMatrix(dgl_type_t etype) const = 0; - /*! + /** * @brief Get adjacency matrix in CSC format. * * A CSC matrix is equivalent to the transpose of a CSR matrix. @@ -453,7 +453,7 @@ class BaseHeteroGraph : public runtime::Object { */ virtual aten::CSRMatrix GetCSCMatrix(dgl_type_t etype) const = 0; - /*! + /** * @brief Extract the induced subgraph by the given vertices. * * The length of the given vector should be equal to the number of vertex @@ -467,7 +467,7 @@ class BaseHeteroGraph : public runtime::Object { virtual HeteroSubgraph VertexSubgraph( const std::vector& vids) const = 0; - /*! + /** * @brief Extract the induced subgraph by the given edges. * * The length of the given vector should be equal to the number of edge types. @@ -482,7 +482,7 @@ class BaseHeteroGraph : public runtime::Object { virtual HeteroSubgraph EdgeSubgraph( const std::vector& eids, bool preserve_nodes = false) const = 0; - /*! + /** * @brief Convert the list of requested unitgraph graphs into a single * unitgraph graph. * @@ -496,7 +496,7 @@ class BaseHeteroGraph : public runtime::Object { return nullptr; } - /*! @brief Cast this graph to immutable graph */ + /** @brief Cast this graph to immutable graph */ virtual GraphPtr AsImmutableGraph() const { LOG(FATAL) << "AsImmutableGraph not supported."; return nullptr; @@ -506,7 +506,7 @@ class BaseHeteroGraph : public runtime::Object { DGL_DECLARE_OBJECT_TYPE_INFO(BaseHeteroGraph, runtime::Object); protected: - /*! @brief meta graph */ + /** @brief meta graph */ GraphPtr meta_graph_; // empty constructor @@ -516,7 +516,7 @@ class BaseHeteroGraph : public runtime::Object { // Define HeteroGraphRef DGL_DEFINE_OBJECT_REF(HeteroGraphRef, BaseHeteroGraph); -/*! +/** * @brief Hetero-subgraph data structure. * * This class can be used as arguments and return values of a C API. @@ -531,16 +531,16 @@ DGL_DEFINE_OBJECT_REF(HeteroGraphRef, BaseHeteroGraph); * */ struct HeteroSubgraph : public runtime::Object { - /*! @brief The heterograph. */ + /** @brief The heterograph. */ HeteroGraphPtr graph; - /*! + /** * @brief The induced vertex ids of each entity type. * The vector length is equal to the number of vertex types in the parent * graph. Each array i has the same length as the number of vertices in type * i. Empty array is allowed if the mapping is identity. */ std::vector induced_vertices; - /*! + /** * @brief The induced edge ids of each relation type. * The vector length is equal to the number of edge types in the parent graph. * Each array i has the same length as the number of edges in type i. @@ -555,46 +555,46 @@ struct HeteroSubgraph : public runtime::Object { // Define HeteroSubgraphRef DGL_DEFINE_OBJECT_REF(HeteroSubgraphRef, HeteroSubgraph); -/*! @brief The flattened heterograph */ +/** @brief The flattened heterograph */ struct FlattenedHeteroGraph : public runtime::Object { - /*! @brief The graph */ + /** @brief The graph */ HeteroGraphRef graph; - /*! + /** * @brief Mapping from source node ID to node type in parent graph * @note The induced type array guarantees that the same type always appear * contiguously. */ IdArray induced_srctype; - /*! + /** * @brief The set of node types in parent graph appearing in source nodes. */ IdArray induced_srctype_set; - /*! @brief Mapping from source node ID to local node ID in parent graph */ + /** @brief Mapping from source node ID to local node ID in parent graph */ IdArray induced_srcid; - /*! + /** * @brief Mapping from edge ID to edge type in parent graph * @note The induced type array guarantees that the same type always appear * contiguously. */ IdArray induced_etype; - /*! + /** * @brief The set of edge types in parent graph appearing in edges. */ IdArray induced_etype_set; - /*! @brief Mapping from edge ID to local edge ID in parent graph */ + /** @brief Mapping from edge ID to local edge ID in parent graph */ IdArray induced_eid; - /*! + /** * @brief Mapping from destination node ID to node type in parent graph * @note The induced type array guarantees that the same type always appear * contiguously. */ IdArray induced_dsttype; - /*! + /** * @brief The set of node types in parent graph appearing in destination * nodes. */ IdArray induced_dsttype_set; - /*! @brief Mapping from destination node ID to local node ID in parent graph + /** @brief Mapping from destination node ID to local node ID in parent graph */ IdArray induced_dstid; @@ -618,7 +618,7 @@ DGL_DEFINE_OBJECT_REF(FlattenedHeteroGraphRef, FlattenedHeteroGraph); // Declarations of functions and algorithms -/*! +/** * @brief Create a heterograph from meta graph and a list of bipartite graph, * additionally specifying number of nodes per type. */ @@ -626,7 +626,7 @@ HeteroGraphPtr CreateHeteroGraph( GraphPtr meta_graph, const std::vector& rel_graphs, const std::vector& num_nodes_per_type = {}); -/*! +/** * @brief Create a heterograph from COO input. * @param num_vtypes Number of vertex types. Must be 1 or 2. * @param num_src Number of nodes in the source type. @@ -645,7 +645,7 @@ HeteroGraphPtr CreateFromCOO( IdArray col, bool row_sorted = false, bool col_sorted = false, dgl_format_code_t formats = ALL_CODE); -/*! +/** * @brief Create a heterograph from COO input. * @param num_vtypes Number of vertex types. Must be 1 or 2. * @param mat The COO matrix @@ -656,7 +656,7 @@ HeteroGraphPtr CreateFromCOO( int64_t num_vtypes, const aten::COOMatrix& mat, dgl_format_code_t formats = ALL_CODE); -/*! +/** * @brief Create a heterograph from CSR input. * @param num_vtypes Number of vertex types. Must be 1 or 2. * @param num_src Number of nodes in the source type. @@ -671,7 +671,7 @@ HeteroGraphPtr CreateFromCSR( int64_t num_vtypes, int64_t num_src, int64_t num_dst, IdArray indptr, IdArray indices, IdArray edge_ids, dgl_format_code_t formats = ALL_CODE); -/*! +/** * @brief Create a heterograph from CSR input. * @param num_vtypes Number of vertex types. Must be 1 or 2. * @param mat The CSR matrix @@ -682,7 +682,7 @@ HeteroGraphPtr CreateFromCSR( int64_t num_vtypes, const aten::CSRMatrix& mat, dgl_format_code_t formats = ALL_CODE); -/*! +/** * @brief Create a heterograph from CSC input. * @param num_vtypes Number of vertex types. Must be 1 or 2. * @param num_src Number of nodes in the source type. @@ -697,7 +697,7 @@ HeteroGraphPtr CreateFromCSC( int64_t num_vtypes, int64_t num_src, int64_t num_dst, IdArray indptr, IdArray indices, IdArray edge_ids, dgl_format_code_t formats = ALL_CODE); -/*! +/** * @brief Create a heterograph from CSC input. * @param num_vtypes Number of vertex types. Must be 1 or 2. * @param mat The CSC matrix @@ -708,7 +708,7 @@ HeteroGraphPtr CreateFromCSC( int64_t num_vtypes, const aten::CSRMatrix& mat, dgl_format_code_t formats = ALL_CODE); -/*! +/** * @brief Extract the subgraph of the in edges of the given nodes. * @param graph Graph * @param nodes Node IDs of each type @@ -720,7 +720,7 @@ HeteroSubgraph InEdgeGraph( const HeteroGraphPtr graph, const std::vector& nodes, bool relabel_nodes = false); -/*! +/** * @brief Extract the subgraph of the out edges of the given nodes. * @param graph Graph * @param nodes Node IDs of each type @@ -732,7 +732,7 @@ HeteroSubgraph OutEdgeGraph( const HeteroGraphPtr graph, const std::vector& nodes, bool relabel_nodes = false); -/*! +/** * @brief Joint union multiple graphs into one graph. * * All input graphs should have the same metagraph. @@ -746,7 +746,7 @@ HeteroSubgraph OutEdgeGraph( HeteroGraphPtr JointUnionHeteroGraph( GraphPtr meta_graph, const std::vector& component_graphs); -/*! +/** * @brief Union multiple graphs into one with each input graph as one disjoint * component. * @@ -766,7 +766,7 @@ HeteroGraphPtr DisjointUnionHeteroGraph( HeteroGraphPtr DisjointUnionHeteroGraph2( GraphPtr meta_graph, const std::vector& component_graphs); -/*! +/** * @brief Slice a contiguous subgraph, e.g. retrieve a component graph from a * batched graph. * @@ -785,7 +785,7 @@ HeteroGraphPtr SliceHeteroGraph( IdArray num_nodes_per_type, IdArray start_nid_per_type, IdArray num_edges_per_type, IdArray start_eid_per_type); -/*! +/** * @brief Split a graph into multiple disjoin components. * * Edges across different components are ignored. All the result graphs have the @@ -814,7 +814,7 @@ std::vector DisjointPartitionHeteroBySizes2( GraphPtr meta_graph, HeteroGraphPtr batched_graph, IdArray vertex_sizes, IdArray edge_sizes); -/*! +/** * @brief Structure for pickle/unpickle. * * The design principle is to leverage the NDArray class as much as possible so @@ -827,29 +827,29 @@ std::vector DisjointPartitionHeteroBySizes2( * This class can be used as arguments and return values of a C API. */ struct HeteroPickleStates : public runtime::Object { - /*! @brief version number */ + /** @brief version number */ int64_t version = 0; - /*! @brief Metainformation + /** @brief Metainformation * * metagraph, number of nodes per type, format, flags */ std::string meta; - /*! @brief Arrays representing graph structure (coo or csr) */ + /** @brief Arrays representing graph structure (coo or csr) */ std::vector arrays; /* To support backward compatibility, we have to retain fields in the old * version of HeteroPickleStates */ - /*! @brief Metagraph(64bits ImmutableGraph) */ + /** @brief Metagraph(64bits ImmutableGraph) */ GraphPtr metagraph; - /*! @brief Number of nodes per type */ + /** @brief Number of nodes per type */ std::vector num_nodes_per_type; - /*! @brief adjacency matrices of each relation graph */ + /** @brief adjacency matrices of each relation graph */ std::vector > adjs; static constexpr const char* _type_key = "graph.HeteroPickleStates"; @@ -859,7 +859,7 @@ struct HeteroPickleStates : public runtime::Object { // Define HeteroPickleStatesRef DGL_DEFINE_OBJECT_REF(HeteroPickleStatesRef, HeteroPickleStates); -/*! +/** * @brief Create a heterograph from pickling states. * * @param states Pickle states @@ -867,7 +867,7 @@ DGL_DEFINE_OBJECT_REF(HeteroPickleStatesRef, HeteroPickleStates); */ HeteroGraphPtr HeteroUnpickle(const HeteroPickleStates& states); -/*! +/** * @brief Get the pickling state of the relation graph structure in backend * tensors. * @@ -875,7 +875,7 @@ HeteroGraphPtr HeteroUnpickle(const HeteroPickleStates& states); */ HeteroPickleStates HeteroPickle(HeteroGraphPtr graph); -/*! +/** * @brief Old version of HeteroUnpickle, for backward compatibility * * @param states Pickle states @@ -883,7 +883,7 @@ HeteroPickleStates HeteroPickle(HeteroGraphPtr graph); */ HeteroGraphPtr HeteroUnpickleOld(const HeteroPickleStates& states); -/*! +/** * @brief Create heterograph from pickling states pickled by ForkingPickler. * * This is different from HeteroUnpickle where @@ -892,7 +892,7 @@ HeteroGraphPtr HeteroUnpickleOld(const HeteroPickleStates& states); */ HeteroGraphPtr HeteroForkingUnpickle(const HeteroPickleStates& states); -/*! +/** * @brief Get the pickling states of the relation graph structure in backend * tensors for ForkingPickler. * diff --git a/include/dgl/bcast.h b/include/dgl/bcast.h index 2c72d25b9a3d..679322d8580c 100644 --- a/include/dgl/bcast.h +++ b/include/dgl/bcast.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/aten/bcast.h * @brief Broadcast related function C++ header. @@ -14,11 +14,11 @@ using namespace dgl::runtime; namespace dgl { -/*! +/** * @brief Broadcast offsets and auxiliary information. */ struct BcastOff { - /*! + /** * @brief offset vector of lhs operand and rhs operand. * @note lhs_offset[i] indicates the start position of the scalar * in lhs operand that required to compute the i-th element @@ -36,9 +36,9 @@ struct BcastOff { * rhs array. */ std::vector lhs_offset, rhs_offset; - /*! @brief Whether broadcast is required or not. */ + /** @brief Whether broadcast is required or not. */ bool use_bcast; - /*! + /** * @brief Auxiliary information for kernel computation * @note lhs_len refers to the left hand side operand length. * e.g. 15 for shape (1, 3, 5) @@ -52,7 +52,7 @@ struct BcastOff { int64_t lhs_len, rhs_len, out_len, reduce_size; }; -/*! +/** * @brief: Compute broadcast and auxiliary information given operator * and operands for kernel computation. * @param op: a string indicates the operator, could be `add`, `sub`, diff --git a/include/dgl/graph.h b/include/dgl/graph.h index b4a52565012a..15295f2bb356 100644 --- a/include/dgl/graph.h +++ b/include/dgl/graph.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file dgl/graph.h * @brief DGL graph index class. @@ -21,20 +21,20 @@ class Graph; class GraphOp; typedef std::shared_ptr MutableGraphPtr; -/*! @brief Mutable graph based on adjacency list. */ +/** @brief Mutable graph based on adjacency list. */ class Graph : public GraphInterface { public: - /*! @brief default constructor */ + /** @brief default constructor */ Graph() {} - /*! @brief construct a graph from the coo format. */ + /** @brief construct a graph from the coo format. */ Graph(IdArray src_ids, IdArray dst_ids, size_t num_nodes); - /*! @brief default copy constructor */ + /** @brief default copy constructor */ Graph(const Graph& other) = default; #ifndef _MSC_VER - /*! @brief default move constructor */ + /** @brief default move constructor */ Graph(Graph&& other) = default; #else Graph(Graph&& other) { @@ -48,13 +48,13 @@ class Graph : public GraphInterface { } #endif // _MSC_VER - /*! @brief default assign constructor */ + /** @brief default assign constructor */ Graph& operator=(const Graph& other) = default; - /*! @brief default destructor */ + /** @brief default destructor */ ~Graph() = default; - /*! + /** * @brief Add vertices to the graph. * @note Since vertices are integers enumerated from zero, only the number of * vertices to be added needs to be specified. @@ -62,21 +62,21 @@ class Graph : public GraphInterface { */ void AddVertices(uint64_t num_vertices) override; - /*! + /** * @brief Add one edge to the graph. * @param src The source vertex. * @param dst The destination vertex. */ void AddEdge(dgl_id_t src, dgl_id_t dst) override; - /*! + /** * @brief Add edges to the graph. * @param src_ids The source vertex id array. * @param dst_ids The destination vertex id array. */ void AddEdges(IdArray src_ids, IdArray dst_ids) override; - /*! + /** * @brief Clear the graph. Remove all vertices/edges. */ void Clear() override { @@ -92,35 +92,35 @@ class Graph : public GraphInterface { uint8_t NumBits() const override { return 64; } - /*! + /** * @note not const since we have caches * @return whether the graph is a multigraph */ bool IsMultigraph() const override; - /*! + /** * @return whether the graph is read-only */ bool IsReadonly() const override { return false; } - /*! @return the number of vertices in the graph.*/ + /** @return the number of vertices in the graph.*/ uint64_t NumVertices() const override { return adjlist_.size(); } - /*! @return the number of edges in the graph.*/ + /** @return the number of edges in the graph.*/ uint64_t NumEdges() const override { return num_edges_; } - /*! @return a 0-1 array indicating whether the given vertices are in the + /** @return a 0-1 array indicating whether the given vertices are in the * graph. */ BoolArray HasVertices(IdArray vids) const override; - /*! @return true if the given edge is in the graph.*/ + /** @return true if the given edge is in the graph.*/ bool HasEdgeBetween(dgl_id_t src, dgl_id_t dst) const override; - /*! @return a 0-1 array indicating whether the given edges are in the graph.*/ + /** @return a 0-1 array indicating whether the given edges are in the graph.*/ BoolArray HasEdgesBetween(IdArray src_ids, IdArray dst_ids) const override; - /*! + /** * @brief Find the predecessors of a vertex. * @param vid The vertex id. * @param radius The radius of the neighborhood. Default is immediate neighbor @@ -129,7 +129,7 @@ class Graph : public GraphInterface { */ IdArray Predecessors(dgl_id_t vid, uint64_t radius = 1) const override; - /*! + /** * @brief Find the successors of a vertex. * @param vid The vertex id. * @param radius The radius of the neighborhood. Default is immediate neighbor @@ -138,7 +138,7 @@ class Graph : public GraphInterface { */ IdArray Successors(dgl_id_t vid, uint64_t radius = 1) const override; - /*! + /** * @brief Get all edge ids between the two given endpoints * @note Edges are associated with an integer id start from zero. * The id is assigned when the edge is being added to the graph. @@ -148,7 +148,7 @@ class Graph : public GraphInterface { */ IdArray EdgeId(dgl_id_t src, dgl_id_t dst) const override; - /*! + /** * @brief Get all edge ids between the given endpoint pairs. * @note Edges are associated with an integer id start from zero. * The id is assigned when the edge is being added to the graph. @@ -159,7 +159,7 @@ class Graph : public GraphInterface { */ EdgeArray EdgeIds(IdArray src, IdArray dst) const override; - /*! + /** * @brief Find the edge ID and return the pair of endpoints * @param eid The edge ID * @return a pair whose first element is the source and the second the @@ -169,7 +169,7 @@ class Graph : public GraphInterface { return std::make_pair(all_edges_src_[eid], all_edges_dst_[eid]); } - /*! + /** * @brief Find the edge IDs and return their source and target node IDs. * @param eids The edge ID array. * @return EdgeArray containing all edges with id in eid. The order is @@ -177,7 +177,7 @@ class Graph : public GraphInterface { */ EdgeArray FindEdges(IdArray eids) const override; - /*! + /** * @brief Get the in edges of the vertex. * @note The returned dst id array is filled with vid. * @param vid The vertex id. @@ -185,14 +185,14 @@ class Graph : public GraphInterface { */ EdgeArray InEdges(dgl_id_t vid) const override; - /*! + /** * @brief Get the in edges of the vertices. * @param vids The vertex id array. * @return the id arrays of the two endpoints of the edges. */ EdgeArray InEdges(IdArray vids) const override; - /*! + /** * @brief Get the out edges of the vertex. * @note The returned src id array is filled with vid. * @param vid The vertex id. @@ -200,14 +200,14 @@ class Graph : public GraphInterface { */ EdgeArray OutEdges(dgl_id_t vid) const override; - /*! + /** * @brief Get the out edges of the vertices. * @param vids The vertex id array. * @return the id arrays of the two endpoints of the edges. */ EdgeArray OutEdges(IdArray vids) const override; - /*! + /** * @brief Get all the edges in the graph. * @note If sorted is true, the returned edges list is sorted by their src and * dst ids. Otherwise, they are in their edge id order. @@ -217,7 +217,7 @@ class Graph : public GraphInterface { */ EdgeArray Edges(const std::string& order = "") const override; - /*! + /** * @brief Get the in degree of the given vertex. * @param vid The vertex id. * @return the in degree @@ -227,14 +227,14 @@ class Graph : public GraphInterface { return reverse_adjlist_[vid].succ.size(); } - /*! + /** * @brief Get the in degrees of the given vertices. * @param vid The vertex id array. * @return the in degree array */ DegreeArray InDegrees(IdArray vids) const override; - /*! + /** * @brief Get the out degree of the given vertex. * @param vid The vertex id. * @return the out degree @@ -244,14 +244,14 @@ class Graph : public GraphInterface { return adjlist_[vid].succ.size(); } - /*! + /** * @brief Get the out degrees of the given vertices. * @param vid The vertex id array. * @return the out degree array */ DegreeArray OutDegrees(IdArray vids) const override; - /*! + /** * @brief Construct the induced subgraph of the given vertices. * * The induced subgraph is a subgraph formed by specifying a set of vertices @@ -270,7 +270,7 @@ class Graph : public GraphInterface { */ Subgraph VertexSubgraph(IdArray vids) const override; - /*! + /** * @brief Construct the induced edge subgraph of the given edges. * * The induced edges subgraph is a subgraph formed by specifying a set of @@ -290,7 +290,7 @@ class Graph : public GraphInterface { Subgraph EdgeSubgraph( IdArray eids, bool preserve_nodes = false) const override; - /*! + /** * @brief Return the successor vector * @param vid The vertex id. * @return the successor vector @@ -301,7 +301,7 @@ class Graph : public GraphInterface { return DGLIdIters(data, data + size); } - /*! + /** * @brief Return the out edge id vector * @param vid The vertex id. * @return the out edge id vector @@ -312,7 +312,7 @@ class Graph : public GraphInterface { return DGLIdIters(data, data + size); } - /*! + /** * @brief Return the predecessor vector * @param vid The vertex id. * @return the predecessor vector @@ -323,7 +323,7 @@ class Graph : public GraphInterface { return DGLIdIters(data, data + size); } - /*! + /** * @brief Return the in edge id vector * @param vid The vertex id. * @return the in edge id vector @@ -334,7 +334,7 @@ class Graph : public GraphInterface { return DGLIdIters(data, data + size); } - /*! + /** * @brief Get the adjacency matrix of the graph. * * By default, a row of returned adjacency matrix represents the destination @@ -346,10 +346,10 @@ class Graph : public GraphInterface { std::vector GetAdj( bool transpose, const std::string& fmt) const override; - /*! @brief Create an empty graph */ + /** @brief Create an empty graph */ static MutableGraphPtr Create() { return std::make_shared(); } - /*! @brief Create from coo */ + /** @brief Create from coo */ static MutableGraphPtr CreateFromCOO( int64_t num_nodes, IdArray src_ids, IdArray dst_ids) { return std::make_shared(src_ids, dst_ids, num_nodes); @@ -357,29 +357,29 @@ class Graph : public GraphInterface { protected: friend class GraphOp; - /*! @brief Internal edge list type */ + /** @brief Internal edge list type */ struct EdgeList { - /*! @brief successor vertex list */ + /** @brief successor vertex list */ std::vector succ; - /*! @brief out edge list */ + /** @brief out edge list */ std::vector edge_id; }; typedef std::vector AdjacencyList; - /*! @brief adjacency list using vector storage */ + /** @brief adjacency list using vector storage */ AdjacencyList adjlist_; - /*! @brief reverse adjacency list using vector storage */ + /** @brief reverse adjacency list using vector storage */ AdjacencyList reverse_adjlist_; - /*! @brief all edges' src endpoints in their edge id order */ + /** @brief all edges' src endpoints in their edge id order */ std::vector all_edges_src_; - /*! @brief all edges' dst endpoints in their edge id order */ + /** @brief all edges' dst endpoints in their edge id order */ std::vector all_edges_dst_; - /*! @brief read only flag */ + /** @brief read only flag */ bool read_only_ = false; - /*! @brief number of edges */ + /** @brief number of edges */ uint64_t num_edges_ = 0; }; diff --git a/include/dgl/graph_interface.h b/include/dgl/graph_interface.h index bc40c88d2790..93f127c96447 100644 --- a/include/dgl/graph_interface.h +++ b/include/dgl/graph_interface.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file dgl/graph_interface.h * @brief DGL graph index class. @@ -19,7 +19,7 @@ namespace dgl { const dgl_id_t DGL_INVALID_ID = static_cast(-1); -/*! +/** * @brief This class references data in std::vector. * * This isn't a STL-style iterator. It provides a STL data container interface. @@ -28,9 +28,9 @@ const dgl_id_t DGL_INVALID_ID = static_cast(-1); */ class DGLIdIters { public: - /* !\brief default constructor to create an empty range */ + /** @brief default constructor to create an empty range */ DGLIdIters() {} - /* !\brief constructor with given begin and end */ + /** @brief constructor with given begin and end */ DGLIdIters(const dgl_id_t *begin, const dgl_id_t *end) { this->begin_ = begin; this->end_ = end; @@ -44,15 +44,15 @@ class DGLIdIters { const dgl_id_t *begin_{nullptr}, *end_{nullptr}; }; -/*! +/** * @brief int32 version for DGLIdIters * */ class DGLIdIters32 { public: - /* !\brief default constructor to create an empty range */ + /** @brief default constructor to create an empty range */ DGLIdIters32() {} - /* !\brief constructor with given begin and end */ + /** @brief constructor with given begin and end */ DGLIdIters32(const int32_t *begin, const int32_t *end) { this->begin_ = begin; this->end_ = end; @@ -78,7 +78,7 @@ class GraphRef; class GraphInterface; typedef std::shared_ptr GraphPtr; -/*! +/** * @brief dgl graph index interface. * * DGL's graph is directed. Vertices are integers enumerated from zero. @@ -93,7 +93,7 @@ class GraphInterface : public runtime::Object { public: virtual ~GraphInterface() = default; - /*! + /** * @brief Add vertices to the graph. * @note Since vertices are integers enumerated from zero, only the number of * vertices to be added needs to be specified. @@ -101,42 +101,42 @@ class GraphInterface : public runtime::Object { */ virtual void AddVertices(uint64_t num_vertices) = 0; - /*! + /** * @brief Add one edge to the graph. * @param src The source vertex. * @param dst The destination vertex. */ virtual void AddEdge(dgl_id_t src, dgl_id_t dst) = 0; - /*! + /** * @brief Add edges to the graph. * @param src_ids The source vertex id array. * @param dst_ids The destination vertex id array. */ virtual void AddEdges(IdArray src_ids, IdArray dst_ids) = 0; - /*! + /** * @brief Clear the graph. Remove all vertices/edges. */ virtual void Clear() = 0; - /*! + /** * @brief Get the device context of this graph. */ virtual DGLContext Context() const = 0; - /*! + /** * @brief Get the number of integer bits used to store node/edge ids * (32 or 64). */ virtual uint8_t NumBits() const = 0; - /*! + /** * @return whether the graph is a multigraph */ virtual bool IsMultigraph() const = 0; - /*! + /** * @return whether the graph is unibipartite */ virtual bool IsUniBipartite() const { @@ -167,32 +167,32 @@ class GraphInterface : public runtime::Object { return is_unibipartite; } - /*! + /** * @return whether the graph is read-only */ virtual bool IsReadonly() const = 0; - /*! @return the number of vertices in the graph.*/ + /** @return the number of vertices in the graph.*/ virtual uint64_t NumVertices() const = 0; - /*! @return the number of edges in the graph.*/ + /** @return the number of edges in the graph.*/ virtual uint64_t NumEdges() const = 0; - /*! @return true if the given vertex is in the graph.*/ + /** @return true if the given vertex is in the graph.*/ virtual bool HasVertex(dgl_id_t vid) const { return vid < NumVertices(); } - /*! @return a 0-1 array indicating whether the given vertices are in the + /** @return a 0-1 array indicating whether the given vertices are in the * graph. */ virtual BoolArray HasVertices(IdArray vids) const = 0; - /*! @return true if the given edge is in the graph.*/ + /** @return true if the given edge is in the graph.*/ virtual bool HasEdgeBetween(dgl_id_t src, dgl_id_t dst) const = 0; - /*! @return a 0-1 array indicating whether the given edges are in the graph.*/ + /** @return a 0-1 array indicating whether the given edges are in the graph.*/ virtual BoolArray HasEdgesBetween(IdArray src_ids, IdArray dst_ids) const = 0; - /*! + /** * @brief Find the predecessors of a vertex. * @param vid The vertex id. * @param radius The radius of the neighborhood. Default is immediate neighbor @@ -201,7 +201,7 @@ class GraphInterface : public runtime::Object { */ virtual IdArray Predecessors(dgl_id_t vid, uint64_t radius = 1) const = 0; - /*! + /** * @brief Find the successors of a vertex. * @param vid The vertex id. * @param radius The radius of the neighborhood. Default is immediate neighbor @@ -210,7 +210,7 @@ class GraphInterface : public runtime::Object { */ virtual IdArray Successors(dgl_id_t vid, uint64_t radius = 1) const = 0; - /*! + /** * @brief Get all edge ids between the two given endpoints * @note Edges are associated with an integer id start from zero. * The id is assigned when the edge is being added to the graph. @@ -220,7 +220,7 @@ class GraphInterface : public runtime::Object { */ virtual IdArray EdgeId(dgl_id_t src, dgl_id_t dst) const = 0; - /*! + /** * @brief Get all edge ids between the given endpoint pairs. * @note Edges are associated with an integer id start from zero. * The id is assigned when the edge is being added to the graph. @@ -231,7 +231,7 @@ class GraphInterface : public runtime::Object { */ virtual EdgeArray EdgeIds(IdArray src, IdArray dst) const = 0; - /*! + /** * @brief Find the edge ID and return the pair of endpoints * @param eid The edge ID * @return a pair whose first element is the source and the second the @@ -239,7 +239,7 @@ class GraphInterface : public runtime::Object { */ virtual std::pair FindEdge(dgl_id_t eid) const = 0; - /*! + /** * @brief Find the edge IDs and return their source and target node IDs. * @param eids The edge ID array. * @return EdgeArray containing all edges with id in eid. The order is @@ -247,7 +247,7 @@ class GraphInterface : public runtime::Object { */ virtual EdgeArray FindEdges(IdArray eids) const = 0; - /*! + /** * @brief Get the in edges of the vertex. * @note The returned dst id array is filled with vid. * @param vid The vertex id. @@ -255,14 +255,14 @@ class GraphInterface : public runtime::Object { */ virtual EdgeArray InEdges(dgl_id_t vid) const = 0; - /*! + /** * @brief Get the in edges of the vertices. * @param vids The vertex id array. * @return the id arrays of the two endpoints of the edges. */ virtual EdgeArray InEdges(IdArray vids) const = 0; - /*! + /** * @brief Get the out edges of the vertex. * @note The returned src id array is filled with vid. * @param vid The vertex id. @@ -270,14 +270,14 @@ class GraphInterface : public runtime::Object { */ virtual EdgeArray OutEdges(dgl_id_t vid) const = 0; - /*! + /** * @brief Get the out edges of the vertices. * @param vids The vertex id array. * @return the id arrays of the two endpoints of the edges. */ virtual EdgeArray OutEdges(IdArray vids) const = 0; - /*! + /** * @brief Get all the edges in the graph. * @note If order is "srcdst", the returned edges list is sorted by their src * and dst ids. If order is "eid", they are in their edge id order. @@ -287,35 +287,35 @@ class GraphInterface : public runtime::Object { */ virtual EdgeArray Edges(const std::string &order = "") const = 0; - /*! + /** * @brief Get the in degree of the given vertex. * @param vid The vertex id. * @return the in degree */ virtual uint64_t InDegree(dgl_id_t vid) const = 0; - /*! + /** * @brief Get the in degrees of the given vertices. * @param vid The vertex id array. * @return the in degree array */ virtual DegreeArray InDegrees(IdArray vids) const = 0; - /*! + /** * @brief Get the out degree of the given vertex. * @param vid The vertex id. * @return the out degree */ virtual uint64_t OutDegree(dgl_id_t vid) const = 0; - /*! + /** * @brief Get the out degrees of the given vertices. * @param vid The vertex id array. * @return the out degree array */ virtual DegreeArray OutDegrees(IdArray vids) const = 0; - /*! + /** * @brief Construct the induced subgraph of the given vertices. * * The induced subgraph is a subgraph formed by specifying a set of vertices @@ -334,7 +334,7 @@ class GraphInterface : public runtime::Object { */ virtual Subgraph VertexSubgraph(IdArray vids) const = 0; - /*! + /** * @brief Construct the induced edge subgraph of the given edges. * * The induced edges subgraph is a subgraph formed by specifying a set of @@ -356,35 +356,35 @@ class GraphInterface : public runtime::Object { virtual Subgraph EdgeSubgraph( IdArray eids, bool preserve_nodes = false) const = 0; - /*! + /** * @brief Return the successor vector * @param vid The vertex id. * @return the successor vector iterator pair. */ virtual DGLIdIters SuccVec(dgl_id_t vid) const = 0; - /*! + /** * @brief Return the out edge id vector * @param vid The vertex id. * @return the out edge id vector iterator pair. */ virtual DGLIdIters OutEdgeVec(dgl_id_t vid) const = 0; - /*! + /** * @brief Return the predecessor vector * @param vid The vertex id. * @return the predecessor vector iterator pair. */ virtual DGLIdIters PredVec(dgl_id_t vid) const = 0; - /*! + /** * @brief Return the in edge id vector * @param vid The vertex id. * @return the in edge id vector iterator pair. */ virtual DGLIdIters InEdgeVec(dgl_id_t vid) const = 0; - /*! + /** * @brief Get the adjacency matrix of the graph. * * By default, a row of returned adjacency matrix represents the destination @@ -403,7 +403,7 @@ class GraphInterface : public runtime::Object { virtual std::vector GetAdj( bool transpose, const std::string &fmt) const = 0; - /*! + /** * @brief Sort the columns in CSR. * * This sorts the columns in each row based on the column Ids. @@ -418,17 +418,17 @@ class GraphInterface : public runtime::Object { // Define GraphRef DGL_DEFINE_OBJECT_REF(GraphRef, GraphInterface); -/*! @brief Subgraph data structure */ +/** @brief Subgraph data structure */ struct Subgraph : public runtime::Object { - /*! @brief The graph. */ + /** @brief The graph. */ GraphPtr graph; - /*! + /** * @brief The induced vertex ids. * @note This is also a map from the new vertex id to the vertex id in the * parent graph. */ IdArray induced_vertices; - /*! + /** * @brief The induced edge ids. * @note This is also a map from the new edge id to the edge id in the parent * graph. @@ -439,21 +439,21 @@ struct Subgraph : public runtime::Object { DGL_DECLARE_OBJECT_TYPE_INFO(Subgraph, runtime::Object); }; -/*! @brief Subgraph data structure for negative subgraph */ +/** @brief Subgraph data structure for negative subgraph */ struct NegSubgraph : public Subgraph { - /*! @brief The existence of the negative edges in the parent graph. */ + /** @brief The existence of the negative edges in the parent graph. */ IdArray exist; - /*! @brief The Ids of head nodes */ + /** @brief The Ids of head nodes */ IdArray head_nid; - /*! @brief The Ids of tail nodes */ + /** @brief The Ids of tail nodes */ IdArray tail_nid; }; -/*! @brief Subgraph data structure for halo subgraph */ +/** @brief Subgraph data structure for halo subgraph */ struct HaloSubgraph : public Subgraph { - /*! @brief Indicate if a node belongs to the partition. */ + /** @brief Indicate if a node belongs to the partition. */ IdArray inner_nodes; }; diff --git a/include/dgl/graph_op.h b/include/dgl/graph_op.h index eaf8fded270d..4bba97a88dee 100644 --- a/include/dgl/graph_op.h +++ b/include/dgl/graph_op.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file dgl/graph_op.h * @brief Operations on graph index. @@ -15,7 +15,7 @@ namespace dgl { class GraphOp { public: - /*! + /** * @brief Return a new graph with all the edges reversed. * * The returned graph preserves the vertex and edge index in the original @@ -25,7 +25,7 @@ class GraphOp { */ static GraphPtr Reverse(GraphPtr graph); - /*! + /** * @brief Return the line graph. * * If i~j and j~i are two edges in original graph G, then @@ -38,7 +38,7 @@ class GraphOp { */ static GraphPtr LineGraph(GraphPtr graph, bool backtracking); - /*! + /** * @brief Return a disjoint union of the input graphs. * * The new graph will include all the nodes/edges in the given graphs. @@ -55,7 +55,7 @@ class GraphOp { */ static GraphPtr DisjointUnion(std::vector graphs); - /*! + /** * @brief Partition the graph into several subgraphs. * * This is a reverse operation of DisjointUnion. The graph will be partitioned @@ -72,7 +72,7 @@ class GraphOp { static std::vector DisjointPartitionByNum( GraphPtr graph, int64_t num); - /*! + /** * @brief Partition the graph into several subgraphs. * * This is a reverse operation of DisjointUnion. The graph will be partitioned @@ -89,7 +89,7 @@ class GraphOp { static std::vector DisjointPartitionBySizes( GraphPtr graph, IdArray sizes); - /*! + /** * @brief Map vids in the parent graph to the vids in the subgraph. * * If the Id doesn't exist in the subgraph, -1 will be used. @@ -102,7 +102,7 @@ class GraphOp { */ static IdArray MapParentIdToSubgraphId(IdArray parent_vid_map, IdArray query); - /*! + /** * @brief Expand an Id array based on the offset array. * * For example, @@ -118,14 +118,14 @@ class GraphOp { */ static IdArray ExpandIds(IdArray ids, IdArray offset); - /*! + /** * @brief Convert the graph to a simple graph. * @param graph The input graph. * @return a new immutable simple graph with no multi-edge. */ static GraphPtr ToSimpleGraph(GraphPtr graph); - /*! + /** * @brief Convert the graph to a mutable bidirected graph. * * If the original graph has m edges for i -> j and n edges for @@ -137,7 +137,7 @@ class GraphOp { */ static GraphPtr ToBidirectedMutableGraph(GraphPtr graph); - /*! + /** * @brief Same as BidirectedMutableGraph except that the returned graph is * immutable. * @param graph The input graph. @@ -145,7 +145,7 @@ class GraphOp { * graph. */ static GraphPtr ToBidirectedImmutableGraph(GraphPtr graph); - /*! + /** * @brief Same as BidirectedMutableGraph except that the returned graph is * immutable and call gk_csr_MakeSymmetric in GKlib. This is more efficient * than ToBidirectedImmutableGraph. It return a null pointer if the conversion @@ -156,7 +156,7 @@ class GraphOp { */ static GraphPtr ToBidirectedSimpleImmutableGraph(ImmutableGraphPtr ig); - /*! + /** * @brief Get a induced subgraph with HALO nodes. * The HALO nodes are the ones that can be reached from `nodes` within * `num_hops`. @@ -168,7 +168,7 @@ class GraphOp { static HaloSubgraph GetSubgraphWithHalo( GraphPtr graph, IdArray nodes, int num_hops); - /*! + /** * @brief Reorder the nodes in the immutable graph. * @param graph The input graph. * @param new_order The node Ids in the new graph. The index in `new_order` is diff --git a/include/dgl/graph_serializer.h b/include/dgl/graph_serializer.h index 31addee0c551..42e54017eb88 100644 --- a/include/dgl/graph_serializer.h +++ b/include/dgl/graph_serializer.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/graph_serializer.cc * @brief DGL serializer APIs diff --git a/include/dgl/graph_traversal.h b/include/dgl/graph_traversal.h index 1aeefa98369f..a042c6f37687 100644 --- a/include/dgl/graph_traversal.h +++ b/include/dgl/graph_traversal.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/graph_traversal.h * @brief common graph traversal operations @@ -12,7 +12,7 @@ namespace dgl { ///////////////////////// Graph Traverse routines ////////////////////////// -/*! +/** * @brief Class for representing frontiers. * * Each frontier is a list of nodes/edges (specified by their ids). @@ -20,22 +20,22 @@ namespace dgl { * value). */ struct Frontiers { - /*!\brief a vector store for the nodes/edges in all the frontiers */ + /** @brief a vector store for the nodes/edges in all the frontiers */ IdArray ids; - /*! + /** * @brief a vector store for node/edge tags. Dtype is int64. * Empty if no tags are requested */ IdArray tags; - /*!\brief a section vector to indicate each frontier Dtype is int64. */ + /** @brief a section vector to indicate each frontier Dtype is int64. */ IdArray sections; }; namespace aten { -/*! +/** * @brief Traverse the graph in a breadth-first-search (BFS) order. * * @param csr The input csr matrix. @@ -44,7 +44,7 @@ namespace aten { */ Frontiers BFSNodesFrontiers(const CSRMatrix& csr, IdArray source); -/*! +/** * @brief Traverse the graph in a breadth-first-search (BFS) order, returning * the edges of the BFS tree. * @@ -54,7 +54,7 @@ Frontiers BFSNodesFrontiers(const CSRMatrix& csr, IdArray source); */ Frontiers BFSEdgesFrontiers(const CSRMatrix& csr, IdArray source); -/*! +/** * @brief Traverse the graph in topological order. * * @param csr The input csr matrix. @@ -62,7 +62,7 @@ Frontiers BFSEdgesFrontiers(const CSRMatrix& csr, IdArray source); */ Frontiers TopologicalNodesFrontiers(const CSRMatrix& csr); -/*! +/** * @brief Traverse the graph in a depth-first-search (DFS) order. * * @param csr The input csr matrix. @@ -71,7 +71,7 @@ Frontiers TopologicalNodesFrontiers(const CSRMatrix& csr); */ Frontiers DGLDFSEdges(const CSRMatrix& csr, IdArray source); -/*! +/** * @brief Traverse the graph in a depth-first-search (DFS) order and return the * recorded edge tag if return_labels is specified. * diff --git a/include/dgl/immutable_graph.h b/include/dgl/immutable_graph.h index b018ed83bd4e..7607ca7c8039 100644 --- a/include/dgl/immutable_graph.h +++ b/include/dgl/immutable_graph.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file dgl/immutable_graph.h * @brief DGL immutable graph index class. @@ -29,7 +29,7 @@ typedef std::shared_ptr COOPtr; class ImmutableGraph; typedef std::shared_ptr ImmutableGraphPtr; -/*! +/** * @brief Graph class stored using CSR structure. */ class CSR : public GraphInterface { @@ -180,37 +180,37 @@ class CSR : public GraphInterface { return {adj_.indptr, adj_.indices, adj_.data}; } - /*! @brief Indicate whether this uses shared memory. */ + /** @brief Indicate whether this uses shared memory. */ bool IsSharedMem() const { return !shared_mem_name_.empty(); } - /*! @brief Return the reverse of this CSR graph (i.e, a CSC graph) */ + /** @brief Return the reverse of this CSR graph (i.e, a CSC graph) */ CSRPtr Transpose() const; - /*! @brief Convert this CSR to COO */ + /** @brief Convert this CSR to COO */ COOPtr ToCOO() const; - /*! + /** * @return the csr matrix that represents this graph. * @note The csr matrix shares the storage with this graph. * The data field of the CSR matrix stores the edge ids. */ aten::CSRMatrix ToCSRMatrix() const { return adj_; } - /*! + /** * @brief Copy the data to another context. * @param ctx The target context. * @return The graph under another context. */ CSR CopyTo(const DGLContext &ctx) const; - /*! + /** * @brief Copy data to shared memory. * @param name The name of the shared memory. * @return The graph in the shared memory */ CSR CopyToSharedMem(const std::string &name) const; - /*! + /** * @brief Convert the graph to use the given number of bits for storage. * @param bits The new number of integer bits (32 or 64). * @return The graph with new bit size storage. @@ -225,10 +225,10 @@ class CSR : public GraphInterface { IdArray edge_ids() const { return adj_.data; } - /*! @return Load CSR from stream */ + /** @return Load CSR from stream */ bool Load(dmlc::Stream *fs); - /*! @return Save CSR to stream */ + /** @return Save CSR to stream */ void Save(dmlc::Stream *fs) const; void SortCSR() override { @@ -239,7 +239,7 @@ class CSR : public GraphInterface { private: friend class Serializer; - /*! @brief private default constructor */ + /** @brief private default constructor */ CSR() { adj_.sorted = false; } // The internal CSR adjacency matrix. // The data field stores edge ids. @@ -424,43 +424,43 @@ class COO : public GraphInterface { } } - /*! @brief Return the transpose of this COO */ + /** @brief Return the transpose of this COO */ COOPtr Transpose() const { return COOPtr(new COO(adj_.num_rows, adj_.col, adj_.row)); } - /*! @brief Convert this COO to CSR */ + /** @brief Convert this COO to CSR */ CSRPtr ToCSR() const; - /*! + /** * @brief Get the coo matrix that represents this graph. * @note The coo matrix shares the storage with this graph. * The data field of the coo matrix is none. */ aten::COOMatrix ToCOOMatrix() const { return adj_; } - /*! + /** * @brief Copy the data to another context. * @param ctx The target context. * @return The graph under another context. */ COO CopyTo(const DGLContext &ctx) const; - /*! + /** * @brief Copy data to shared memory. * @param name The name of the shared memory. * @return The graph in the shared memory */ COO CopyToSharedMem(const std::string &name) const; - /*! + /** * @brief Convert the graph to use the given number of bits for storage. * @param bits The new number of integer bits (32 or 64). * @return The graph with new bit size storage. */ COO AsNumBits(uint8_t bits) const; - /*! @brief Indicate whether this uses shared memory. */ + /** @brief Indicate whether this uses shared memory. */ bool IsSharedMem() const { return false; } // member getters @@ -470,7 +470,7 @@ class COO : public GraphInterface { IdArray dst() const { return adj_.col; } private: - /* !\brief private default constructor */ + /** @brief private default constructor */ COO() {} // The internal COO adjacency matrix. @@ -478,17 +478,17 @@ class COO : public GraphInterface { aten::COOMatrix adj_; }; -/*! +/** * @brief DGL immutable graph index class. * * DGL's graph is directed. Vertices are integers enumerated from zero. */ class ImmutableGraph : public GraphInterface { public: - /*! @brief Construct an immutable graph from the COO format. */ + /** @brief Construct an immutable graph from the COO format. */ explicit ImmutableGraph(COOPtr coo) : coo_(coo) {} - /*! + /** * @brief Construct an immutable graph from the CSR format. * * For a single graph, we need two CSRs, one stores the in-edges of vertices @@ -506,14 +506,14 @@ class ImmutableGraph : public GraphInterface { CHECK(in_csr_ || out_csr_) << "Both CSR are missing."; } - /*! @brief Construct an immutable graph from one CSR. */ + /** @brief Construct an immutable graph from one CSR. */ explicit ImmutableGraph(CSRPtr csr) : out_csr_(csr) {} - /*! @brief default copy constructor */ + /** @brief default copy constructor */ ImmutableGraph(const ImmutableGraph &other) = default; #ifndef _MSC_VER - /*! @brief default move constructor */ + /** @brief default move constructor */ ImmutableGraph(ImmutableGraph &&other) = default; #else ImmutableGraph(ImmutableGraph &&other) { @@ -526,10 +526,10 @@ class ImmutableGraph : public GraphInterface { } #endif // _MSC_VER - /*! @brief default assign constructor */ + /** @brief default assign constructor */ ImmutableGraph &operator=(const ImmutableGraph &other) = default; - /*! @brief default destructor */ + /** @brief default destructor */ ~ImmutableGraph() = default; void AddVertices(uint64_t num_vertices) override { @@ -552,13 +552,13 @@ class ImmutableGraph : public GraphInterface { uint8_t NumBits() const override { return AnyGraph()->NumBits(); } - /*! + /** * @note not const since we have caches * @return whether the graph is a multigraph */ bool IsMultigraph() const override { return AnyGraph()->IsMultigraph(); } - /*! + /** * @return whether the graph is read-only */ bool IsReadonly() const override { return true; } @@ -577,18 +577,18 @@ class ImmutableGraph : public GraphInterface { return is_unibipartite_; } - /*! @return the number of vertices in the graph.*/ + /** @return the number of vertices in the graph.*/ uint64_t NumVertices() const override { return AnyGraph()->NumVertices(); } - /*! @return the number of edges in the graph.*/ + /** @return the number of edges in the graph.*/ uint64_t NumEdges() const override { return AnyGraph()->NumEdges(); } - /*! @return true if the given vertex is in the graph.*/ + /** @return true if the given vertex is in the graph.*/ bool HasVertex(dgl_id_t vid) const override { return vid < NumVertices(); } BoolArray HasVertices(IdArray vids) const override; - /*! @return true if the given edge is in the graph.*/ + /** @return true if the given edge is in the graph.*/ bool HasEdgeBetween(dgl_id_t src, dgl_id_t dst) const override { if (in_csr_) { return in_csr_->HasEdgeBetween(dst, src); @@ -605,7 +605,7 @@ class ImmutableGraph : public GraphInterface { } } - /*! + /** * @brief Find the predecessors of a vertex. * @param vid The vertex id. * @param radius The radius of the neighborhood. Default is immediate neighbor @@ -616,7 +616,7 @@ class ImmutableGraph : public GraphInterface { return GetInCSR()->Successors(vid, radius); } - /*! + /** * @brief Find the successors of a vertex. * @param vid The vertex id. * @param radius The radius of the neighborhood. Default is immediate neighbor @@ -627,7 +627,7 @@ class ImmutableGraph : public GraphInterface { return GetOutCSR()->Successors(vid, radius); } - /*! + /** * @brief Get all edge ids between the two given endpoints * @note Edges are associated with an integer id start from zero. * The id is assigned when the edge is being added to the graph. @@ -643,7 +643,7 @@ class ImmutableGraph : public GraphInterface { } } - /*! + /** * @brief Get all edge ids between the given endpoint pairs. * @note Edges are associated with an integer id start from zero. * The id is assigned when the edge is being added to the graph. @@ -661,7 +661,7 @@ class ImmutableGraph : public GraphInterface { } } - /*! + /** * @brief Find the edge ID and return the pair of endpoints * @param eid The edge ID * @return a pair whose first element is the source and the second the @@ -671,7 +671,7 @@ class ImmutableGraph : public GraphInterface { return GetCOO()->FindEdge(eid); } - /*! + /** * @brief Find the edge IDs and return their source and target node IDs. * @param eids The edge ID array. * @return EdgeArray containing all edges with id in eid. The order is @@ -681,7 +681,7 @@ class ImmutableGraph : public GraphInterface { return GetCOO()->FindEdges(eids); } - /*! + /** * @brief Get the in edges of the vertex. * @note The returned dst id array is filled with vid. * @param vid The vertex id. @@ -692,7 +692,7 @@ class ImmutableGraph : public GraphInterface { return {ret.dst, ret.src, ret.id}; } - /*! + /** * @brief Get the in edges of the vertices. * @param vids The vertex id array. * @return the id arrays of the two endpoints of the edges. @@ -702,7 +702,7 @@ class ImmutableGraph : public GraphInterface { return {ret.dst, ret.src, ret.id}; } - /*! + /** * @brief Get the out edges of the vertex. * @note The returned src id array is filled with vid. * @param vid The vertex id. @@ -712,7 +712,7 @@ class ImmutableGraph : public GraphInterface { return GetOutCSR()->OutEdges(vid); } - /*! + /** * @brief Get the out edges of the vertices. * @param vids The vertex id array. * @return the id arrays of the two endpoints of the edges. @@ -721,7 +721,7 @@ class ImmutableGraph : public GraphInterface { return GetOutCSR()->OutEdges(vids); } - /*! + /** * @brief Get all the edges in the graph. * @note If sorted is true, the returned edges list is sorted by their src and * dst ids. Otherwise, they are in their edge id order. @@ -731,7 +731,7 @@ class ImmutableGraph : public GraphInterface { */ EdgeArray Edges(const std::string &order = "") const override; - /*! + /** * @brief Get the in degree of the given vertex. * @param vid The vertex id. * @return the in degree @@ -740,7 +740,7 @@ class ImmutableGraph : public GraphInterface { return GetInCSR()->OutDegree(vid); } - /*! + /** * @brief Get the in degrees of the given vertices. * @param vid The vertex id array. * @return the in degree array @@ -749,7 +749,7 @@ class ImmutableGraph : public GraphInterface { return GetInCSR()->OutDegrees(vids); } - /*! + /** * @brief Get the out degree of the given vertex. * @param vid The vertex id. * @return the out degree @@ -758,7 +758,7 @@ class ImmutableGraph : public GraphInterface { return GetOutCSR()->OutDegree(vid); } - /*! + /** * @brief Get the out degrees of the given vertices. * @param vid The vertex id array. * @return the out degree array @@ -767,7 +767,7 @@ class ImmutableGraph : public GraphInterface { return GetOutCSR()->OutDegrees(vids); } - /*! + /** * @brief Construct the induced subgraph of the given vertices. * * The induced subgraph is a subgraph formed by specifying a set of vertices @@ -786,7 +786,7 @@ class ImmutableGraph : public GraphInterface { */ Subgraph VertexSubgraph(IdArray vids) const override; - /*! + /** * @brief Construct the induced edge subgraph of the given edges. * * The induced edges subgraph is a subgraph formed by specifying a set of @@ -806,7 +806,7 @@ class ImmutableGraph : public GraphInterface { Subgraph EdgeSubgraph( IdArray eids, bool preserve_nodes = false) const override; - /*! + /** * @brief Return the successor vector * @param vid The vertex id. * @return the successor vector @@ -815,7 +815,7 @@ class ImmutableGraph : public GraphInterface { return GetOutCSR()->SuccVec(vid); } - /*! + /** * @brief Return the out edge id vector * @param vid The vertex id. * @return the out edge id vector @@ -824,7 +824,7 @@ class ImmutableGraph : public GraphInterface { return GetOutCSR()->OutEdgeVec(vid); } - /*! + /** * @brief Return the predecessor vector * @param vid The vertex id. * @return the predecessor vector @@ -833,7 +833,7 @@ class ImmutableGraph : public GraphInterface { return GetInCSR()->SuccVec(vid); } - /*! + /** * @brief Return the in edge id vector * @param vid The vertex id. * @return the in edge id vector @@ -842,7 +842,7 @@ class ImmutableGraph : public GraphInterface { return GetInCSR()->OutEdgeVec(vid); } - /*! + /** * @brief Get the adjacency matrix of the graph. * * By default, a row of returned adjacency matrix represents the destination @@ -854,28 +854,28 @@ class ImmutableGraph : public GraphInterface { std::vector GetAdj( bool transpose, const std::string &fmt) const override; - /* !\brief Return in csr. If not exist, transpose the other one.*/ + /** @brief Return in csr. If not exist, transpose the other one.*/ CSRPtr GetInCSR() const; - /* !\brief Return out csr. If not exist, transpose the other one.*/ + /** @brief Return out csr. If not exist, transpose the other one.*/ CSRPtr GetOutCSR() const; - /* !\brief Return coo. If not exist, create from csr.*/ + /** @brief Return coo. If not exist, create from csr.*/ COOPtr GetCOO() const; - /*! @brief Create an immutable graph from CSR. */ + /** @brief Create an immutable graph from CSR. */ static ImmutableGraphPtr CreateFromCSR( IdArray indptr, IdArray indices, IdArray edge_ids, const std::string &edge_dir); static ImmutableGraphPtr CreateFromCSR(const std::string &shared_mem_name); - /*! @brief Create an immutable graph from COO. */ + /** @brief Create an immutable graph from COO. */ static ImmutableGraphPtr CreateFromCOO( int64_t num_vertices, IdArray src, IdArray dst, bool row_osrted = false, bool col_sorted = false); - /*! + /** * @brief Convert the given graph to an immutable graph. * * If the graph is already an immutable graph. The result graph will share @@ -886,14 +886,14 @@ class ImmutableGraph : public GraphInterface { */ static ImmutableGraphPtr ToImmutable(GraphPtr graph); - /*! + /** * @brief Copy the data to another context. * @param ctx The target context. * @return The graph under another context. */ static ImmutableGraphPtr CopyTo(ImmutableGraphPtr g, const DGLContext &ctx); - /*! + /** * @brief Copy data to shared memory. * @param name The name of the shared memory. * @return The graph in the shared memory @@ -901,14 +901,14 @@ class ImmutableGraph : public GraphInterface { static ImmutableGraphPtr CopyToSharedMem( ImmutableGraphPtr g, const std::string &name); - /*! + /** * @brief Convert the graph to use the given number of bits for storage. * @param bits The new number of integer bits (32 or 64). * @return The graph with new bit size storage. */ static ImmutableGraphPtr AsNumBits(ImmutableGraphPtr g, uint8_t bits); - /*! + /** * @brief Return a new graph with all the edges reversed. * * The returned graph preserves the vertex and edge index in the original @@ -918,10 +918,10 @@ class ImmutableGraph : public GraphInterface { */ ImmutableGraphPtr Reverse() const; - /*! @return Load ImmutableGraph from stream, using out csr */ + /** @return Load ImmutableGraph from stream, using out csr */ bool Load(dmlc::Stream *fs); - /*! @return Save ImmutableGraph to stream, using out csr */ + /** @return Save ImmutableGraph to stream, using out csr */ void Save(dmlc::Stream *fs) const; void SortCSR() override { @@ -933,17 +933,17 @@ class ImmutableGraph : public GraphInterface { bool HasOutCSR() const { return out_csr_ != NULL; } - /*! @brief Cast this graph to a heterograph */ + /** @brief Cast this graph to a heterograph */ HeteroGraphPtr AsHeteroGraph() const; protected: friend class Serializer; friend class UnitGraph; - /* !\brief internal default constructor */ + /** @brief internal default constructor */ ImmutableGraph() {} - /* !\brief internal constructor for all the members */ + /** @brief internal constructor for all the members */ ImmutableGraph(CSRPtr in_csr, CSRPtr out_csr, COOPtr coo) : in_csr_(in_csr), out_csr_(out_csr), coo_(coo) { CHECK(AnyGraph()) << "At least one graph structure should exist."; @@ -956,7 +956,7 @@ class ImmutableGraph : public GraphInterface { this->shared_mem_name_ = shared_mem_name; } - /* !\brief return pointer to any available graph structure */ + /** @brief return pointer to any available graph structure */ GraphPtr AnyGraph() const { if (in_csr_) { return in_csr_; diff --git a/include/dgl/kernel.h b/include/dgl/kernel.h index f2c3fda37a4f..73c75084ad52 100644 --- a/include/dgl/kernel.h +++ b/include/dgl/kernel.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/aten/kernel.h * @brief Sparse matrix operators. @@ -17,7 +17,7 @@ namespace dgl { namespace aten { -/*! +/** * @brief Generalized Sparse Matrix-Matrix Multiplication. * @param op The binary operator, could be `add`, `sub', `mul`, 'div', * `copy_u`, `copy_e'. @@ -34,7 +34,7 @@ void SpMM( const std::string& op, const std::string& reduce, HeteroGraphPtr graph, NDArray ufeat, NDArray efeat, NDArray out, std::vector out_aux); -/*! +/** * @brief Generalized Sampled Dense-Dense Matrix Multiplication. * @param op The binary operator, could be `add`, `sub', `mul`, 'div', * `dot`, `copy_u`, `copy_e'. @@ -47,7 +47,7 @@ void SDDMM( const std::string& op, HeteroGraphPtr graph, NDArray ufeat, NDArray efeat, NDArray out); -/*! +/** * @brief Sparse-sparse matrix multiplication. * * The sparse matrices must have scalar weights (i.e. \a A_weights and \a @@ -56,7 +56,7 @@ void SDDMM( std::pair CSRMM( CSRMatrix A, NDArray A_weights, CSRMatrix B, NDArray B_weights); -/*! +/** * @brief Summing up a list of sparse matrices. * * The sparse matrices must have scalar weights (i.e. the arrays in \a A_weights diff --git a/include/dgl/lazy.h b/include/dgl/lazy.h index d96ea9e1892c..db1dd3b67a2d 100644 --- a/include/dgl/lazy.h +++ b/include/dgl/lazy.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file dgl/lazy.h * @brief Lazy object that will be materialized only when being queried. @@ -10,7 +10,7 @@ namespace dgl { -/*! +/** * @brief Lazy object that will be materialized only when being queried. * * The object should be immutable -- no mutation once materialized. @@ -19,18 +19,18 @@ namespace dgl { template class Lazy { public: - /*!\brief default constructor to construct a lazy object */ + /** @brief default constructor to construct a lazy object */ Lazy() {} - /*! + /** * @brief constructor to construct an object with given value (non-lazy case) */ explicit Lazy(const T& val) : ptr_(new T(val)) {} - /*!\brief destructor */ + /** @brief destructor */ ~Lazy() = default; - /*! + /** * @brief Get the value of this object. If the object has not been * instantiated, using the provided function to create it. * @param fn The creator function. @@ -45,7 +45,7 @@ class Lazy { } private: - /*!\brief the internal data pointer */ + /** @brief the internal data pointer */ std::shared_ptr ptr_{nullptr}; }; diff --git a/include/dgl/nodeflow.h b/include/dgl/nodeflow.h index b9e325161186..9829f1f83db5 100644 --- a/include/dgl/nodeflow.h +++ b/include/dgl/nodeflow.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file dgl/nodeflow.h * @brief DGL NodeFlow class. @@ -17,7 +17,7 @@ namespace dgl { class ImmutableGraph; -/*! +/** * @brief A NodeFlow graph stores the sampling results for a sampler that * samples nodes/edges in layers. * @@ -26,21 +26,21 @@ class ImmutableGraph; * node and edge mapping from the NodeFlow graph to the parent graph. */ struct NodeFlowObject : public runtime::Object { - /*! @brief The graph. */ + /** @brief The graph. */ GraphPtr graph; - /*! + /** * @brief the offsets of each layer. */ IdArray layer_offsets; - /*! + /** * @brief the offsets of each flow. */ IdArray flow_offsets; - /*! + /** * @brief The node mapping from the NodeFlow graph to the parent graph. */ IdArray node_mapping; - /*! + /** * @brief The edge mapping from the NodeFlow graph to the parent graph. */ IdArray edge_mapping; @@ -54,13 +54,13 @@ class NodeFlow : public runtime::ObjectRef { public: DGL_DEFINE_OBJECT_REF_METHODS(NodeFlow, runtime::ObjectRef, NodeFlowObject); - /*! @brief create a new nodeflow reference */ + /** @brief create a new nodeflow reference */ static NodeFlow Create() { return NodeFlow(std::make_shared()); } }; -/*! +/** * @brief Get a slice on a graph that represents a NodeFlow. * * The entire block has to be taken as a slice. Users have to specify the diff --git a/include/dgl/packed_func_ext.h b/include/dgl/packed_func_ext.h index 8539960485ea..79aec15cbe34 100644 --- a/include/dgl/packed_func_ext.h +++ b/include/dgl/packed_func_ext.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file packed_func_ext.h * @brief Extension package to PackedFunc @@ -18,7 +18,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief Runtime type checker for node type. * @tparam T the type to be checked. */ diff --git a/include/dgl/random.h b/include/dgl/random.h index 67bbb3684a36..baa8bc2b9748 100644 --- a/include/dgl/random.h +++ b/include/dgl/random.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/random.h * @brief Random number generators @@ -35,36 +35,36 @@ inline uint32_t GetThreadId() { }; // namespace -/*! +/** * @brief Thread-local Random Number Generator class */ class RandomEngine { public: - /*! @brief Constructor with default seed */ + /** @brief Constructor with default seed */ RandomEngine() { std::random_device rd; SetSeed(rd()); } - /*! @brief Constructor with given seed */ + /** @brief Constructor with given seed */ explicit RandomEngine(uint32_t seed) { SetSeed(seed); } - /*! @brief Get the thread-local random number generator instance */ + /** @brief Get the thread-local random number generator instance */ static RandomEngine* ThreadLocal() { return dmlc::ThreadLocalStore::Get(); } - /*! + /** * @brief Set the seed of this random number generator */ void SetSeed(uint32_t seed) { rng_.seed(seed + GetThreadId()); } - /*! + /** * @brief Generate an arbitrary random 32-bit integer. */ int32_t RandInt32() { return static_cast(rng_()); } - /*! + /** * @brief Generate a uniform random integer in [0, upper) */ template @@ -72,7 +72,7 @@ class RandomEngine { return RandInt(0, upper); } - /*! + /** * @brief Generate a uniform random integer in [lower, upper) */ template @@ -82,7 +82,7 @@ class RandomEngine { return dist(rng_); } - /*! + /** * @brief Generate a uniform random float in [0, 1) */ template @@ -90,7 +90,7 @@ class RandomEngine { return Uniform(0., 1.); } - /*! + /** * @brief Generate a uniform random float in [lower, upper) */ template @@ -102,7 +102,7 @@ class RandomEngine { return dist(rng_); } - /*! + /** * @brief Pick a random integer between 0 to N-1 according to given * probabilities. * @tparam IdxType Return integer type. @@ -113,7 +113,7 @@ class RandomEngine { template IdxType Choice(FloatArray prob); - /*! + /** * @brief Pick random integers between 0 to N-1 according to given * probabilities * @@ -130,7 +130,7 @@ class RandomEngine { template void Choice(IdxType num, FloatArray prob, IdxType* out, bool replace = true); - /*! + /** * @brief Pick random integers between 0 to N-1 according to given * probabilities * @@ -153,7 +153,7 @@ class RandomEngine { return ret; } - /*! + /** * @brief Pick random integers from population by uniform distribution. * * If replace is false, num must not be larger than population. @@ -168,7 +168,7 @@ class RandomEngine { void UniformChoice( IdxType num, IdxType population, IdxType* out, bool replace = true); - /*! + /** * @brief Pick random integers from population by uniform distribution. * * If replace is false, num must not be larger than population. @@ -189,7 +189,7 @@ class RandomEngine { return ret; } - /*! + /** * @brief Pick random integers with different probability for different * segments. * @@ -223,7 +223,7 @@ class RandomEngine { IdxType num, const IdxType* split, FloatArray bias, IdxType* out, bool replace = true); - /*! + /** * @brief Pick random integers with different probability for different * segments. * diff --git a/include/dgl/runtime/c_backend_api.h b/include/dgl/runtime/c_backend_api.h index 3ac785b784d8..0b7e5fffad82 100644 --- a/include/dgl/runtime/c_backend_api.h +++ b/include/dgl/runtime/c_backend_api.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/runtime/c_backend_api.h * @brief DGL runtime backend API. @@ -17,7 +17,7 @@ extern "C" { #endif // Backend related functions. -/*! +/** * @brief Backend function for modules to get function * from its environment mod_node (its imports and global function). * The user do should not call DGLFuncFree on func. @@ -29,7 +29,7 @@ extern "C" { */ DGL_DLL int DGLBackendGetFuncFromEnv( void* mod_node, const char* func_name, DGLFunctionHandle* out); -/*! +/** * @brief Backend function to register system-wide library symbol. * * @param name The name of the symbol @@ -38,7 +38,7 @@ DGL_DLL int DGLBackendGetFuncFromEnv( */ DGL_DLL int DGLBackendRegisterSystemLibSymbol(const char* name, void* ptr); -/*! +/** * @brief Backend function to allocate temporal workspace. * * @note The result allocate spaced is ensured to be aligned to @@ -57,7 +57,7 @@ DGL_DLL void* DGLBackendAllocWorkspace( int device_type, int device_id, uint64_t nbytes, int dtype_code_hint, int dtype_bits_hint); -/*! +/** * @brief Backend function to free temporal workspace. * * @param ptr The result allocated space pointer. @@ -69,19 +69,19 @@ DGL_DLL void* DGLBackendAllocWorkspace( */ DGL_DLL int DGLBackendFreeWorkspace(int device_type, int device_id, void* ptr); -/*! +/** * @brief Environment for DGL parallel task. */ typedef struct { - /*! + /** * @brief Auxiliary used for synchronization */ void* sync_handle; - /*! @brief total amount of task */ + /** @brief total amount of task */ int32_t num_task; } DGLParallelGroupEnv; -/*! +/** * @brief The callback function to execute a parallel lambda * @param task_id the task id of the function. * @param penv The parallel environment backs the execution. @@ -90,7 +90,7 @@ typedef struct { typedef int (*FDGLParallelLambda)( int task_id, DGLParallelGroupEnv* penv, void* cdata); -/*! +/** * @brief Backend function for running parallel jobs. * * @param flambda The parallel function to be launched. @@ -103,7 +103,7 @@ typedef int (*FDGLParallelLambda)( DGL_DLL int DGLBackendParallelLaunch( FDGLParallelLambda flambda, void* cdata, int num_task); -/*! +/** * @brief BSP barrrier between parallel threads * @param task_id the task id of the function. * @param penv The parallel environment backs the execution. @@ -111,7 +111,7 @@ DGL_DLL int DGLBackendParallelLaunch( */ DGL_DLL int DGLBackendParallelBarrier(int task_id, DGLParallelGroupEnv* penv); -/*! +/** * @brief Simple static initialization fucntion. * Run f once and set handle to be not null. * This function is mainly used for test purpose. diff --git a/include/dgl/runtime/c_object_api.h b/include/dgl/runtime/c_object_api.h index 651c1938dfb8..e83b5845960e 100644 --- a/include/dgl/runtime/c_object_api.h +++ b/include/dgl/runtime/c_object_api.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file dgl/runtime/c_object_api.h * @@ -16,17 +16,17 @@ extern "C" { #endif -/*! @brief handle to object */ +/** @brief handle to object */ typedef void* ObjectHandle; -/*! +/** * @brief free the object handle * @param handle The object handle to be freed. * @return 0 when success, -1 when failure happens */ DGL_DLL int DGLObjectFree(ObjectHandle handle); -/*! +/** * @brief Convert type key to type index. * @param type_key The key of the type. * @param out_index the corresponding type index. @@ -34,7 +34,7 @@ DGL_DLL int DGLObjectFree(ObjectHandle handle); */ DGL_DLL int DGLObjectTypeKey2Index(const char* type_key, int* out_index); -/*! +/** * @brief Get runtime type index of the object. * @param handle the object handle. * @param out_index the corresponding type index. @@ -42,7 +42,7 @@ DGL_DLL int DGLObjectTypeKey2Index(const char* type_key, int* out_index); */ DGL_DLL int DGLObjectGetTypeIndex(ObjectHandle handle, int* out_index); -/*! +/** * @brief get attributes given key * @param handle The object handle * @param key The attribute name @@ -56,7 +56,7 @@ DGL_DLL int DGLObjectGetAttr( ObjectHandle handle, const char* key, DGLValue* out_value, int* out_type_code, int* out_success); -/*! +/** * @brief get attributes names in the object. * @param handle The object handle * @param out_size The number of functions diff --git a/include/dgl/runtime/c_runtime_api.h b/include/dgl/runtime/c_runtime_api.h index aa86ab0c49c8..fd5c9586e767 100644 --- a/include/dgl/runtime/c_runtime_api.h +++ b/include/dgl/runtime/c_runtime_api.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2016-2022 by Contributors * @file dgl/runtime/c_runtime_api.h * @brief DGL runtime library. @@ -41,10 +41,10 @@ extern "C" { #include #include -/*! @brief type of array index. */ +/** @brief type of array index. */ typedef int64_t dgl_index_t; -/*! +/** * @brief The device type in DGLContext. */ #ifdef __cplusplus @@ -52,14 +52,14 @@ typedef enum : int32_t { #else typedef enum { #endif - /*! @brief CPU device */ + /** @brief CPU device */ kDGLCPU = 1, - /*! @brief CUDA GPU device */ + /** @brief CUDA GPU device */ kDGLCUDA = 2, // add more devices once supported } DGLDeviceType; -/*! +/** * @brief The object type code is used in DGL FFI to indicate the types of * objects passed between C and Python. */ @@ -90,22 +90,22 @@ typedef enum { kExtEnd = 128U } DGLObjectTypeCode; -/*! +/** * @brief The type code options DGLDataType. */ typedef enum { - /*! @brief signed integer */ + /** @brief signed integer */ kDGLInt = 0U, - /*! @brief unsigned integer */ + /** @brief unsigned integer */ kDGLUInt = 1U, - /*! @brief IEEE floating point */ + /** @brief IEEE floating point */ kDGLFloat = 2U, - /*! @brief bfloat16 */ + /** @brief bfloat16 */ kDGLBfloat = 4U, // add more data types if we are going to support them } DGLDataTypeCode; -/*! +/** * @brief The data type the tensor can hold. The data type is assumed to follow * the native endian-ness. An explicit error message should be raised when * attempting to export an array with non-native endianness @@ -116,39 +116,39 @@ typedef enum { * - int8: type_code = 0, bits = 8, lanes=1 */ typedef struct { - /*! + /** * @brief Type code of base types. * We keep it uint8_t instead of DGLDataTypeCode for minimal memory * footprint, but the value should be one of DGLDataTypeCode enum values. * */ uint8_t code; - /*! + /** * @brief Number of bits, common choices are 8, 16, 32. */ uint8_t bits; - /*! @brief Number of lanes in the type, used for vector types. */ + /** @brief Number of lanes in the type, used for vector types. */ uint16_t lanes; } DGLDataType; -/*! +/** * @brief The Device information, abstract away common device types. */ typedef struct { - /*! @brief The device type used in the device. */ + /** @brief The device type used in the device. */ DGLDeviceType device_type; - /*! + /** * @brief The device index. * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0. */ int32_t device_id; } DGLContext; -/*! +/** * @brief The tensor array stucture to DGL API. * The structure is heavily inspired by DLTensor from DLPack. */ typedef struct { - /*! + /** * @brief The data pointer points to the allocated data. * * Depending on the device context, it can be a CPU pointer, or a CUDA @@ -178,27 +178,27 @@ typedef struct { * @endcode */ void* data; - /*! @brief The device of the tensor */ + /** @brief The device of the tensor */ DGLContext ctx; - /*! @brief Number of dimensions */ + /** @brief Number of dimensions */ int32_t ndim; - /*! @brief The data type of the pointer*/ + /** @brief The data type of the pointer*/ DGLDataType dtype; - /*! @brief The shape of the tensor */ + /** @brief The shape of the tensor */ int64_t* shape; - /*! + /** * @brief strides of the tensor (in number of elements, not bytes) * can be NULL, indicating tensor is compact and row-majored. */ int64_t* strides; - /*! @brief The offset in bytes to the beginning pointer to data */ + /** @brief The offset in bytes to the beginning pointer to data */ uint64_t byte_offset; } DGLArray; -/*! @brief the array handle */ +/** @brief the array handle */ typedef DGLArray* DGLArrayHandle; -/*! +/** * @brief Union type of values * being passed through API and function calls. */ @@ -211,7 +211,7 @@ typedef union { DGLContext v_ctx; } DGLValue; -/*! +/** * @brief Byte array type used to pass in byte array * When kBytes is used as data type. */ @@ -220,26 +220,26 @@ typedef struct { size_t size; } DGLByteArray; -/*! @brief Handle to DGL runtime modules. */ +/** @brief Handle to DGL runtime modules. */ typedef void* DGLModuleHandle; -/*! @brief Handle to packed function handle. */ +/** @brief Handle to packed function handle. */ typedef void* DGLFunctionHandle; -/*! @brief Handle to hold return value. */ +/** @brief Handle to hold return value. */ typedef void* DGLRetValueHandle; -/*! +/** * @brief The stream that is specific to device * can be NULL, which indicates the default one. */ typedef void* DGLStreamHandle; -/*! +/** * @brief Used for implementing C API function. * Set last error message before return. * @param msg The error message to be set. */ DGL_DLL void DGLAPISetLastError(const char* msg); -/*! +/** * @brief return str message of the last error * all function in this file will return 0 when success * and -1 when an error occured, @@ -249,7 +249,7 @@ DGL_DLL void DGLAPISetLastError(const char* msg); * \return error info */ DGL_DLL const char* DGLGetLastError(void); -/*! +/** * @brief Load module from file. * @param file_name The file name to load the module from. * @param format The format of the module. @@ -262,7 +262,7 @@ DGL_DLL const char* DGLGetLastError(void); DGL_DLL int DGLModLoadFromFile( const char* file_name, const char* format, DGLModuleHandle* out); -/*! +/** * @brief Add dep to mod's dependency. * This allows functions in this module to use modules. * @@ -272,7 +272,7 @@ DGL_DLL int DGLModLoadFromFile( */ DGL_DLL int DGLModImport(DGLModuleHandle mod, DGLModuleHandle dep); -/*! +/** * @brief Get function from the module. * @param mod The module handle. * @param func_name The name of the function. @@ -284,7 +284,7 @@ DGL_DLL int DGLModGetFunction( DGLModuleHandle mod, const char* func_name, int query_imports, DGLFunctionHandle* out); -/*! +/** * @brief Free front-end extension type resource. * @param handle The extension handle. * @param type_code The type of of the extension type. @@ -292,7 +292,7 @@ DGL_DLL int DGLModGetFunction( */ DGL_DLL int DGLExtTypeFree(void* handle, int type_code); -/*! +/** * @brief Free the Module * @param mod The module to be freed. * @@ -305,14 +305,14 @@ DGL_DLL int DGLExtTypeFree(void* handle, int type_code); */ DGL_DLL int DGLModFree(DGLModuleHandle mod); -/*! +/** * @brief Free the function when it is no longer needed. * @param func The function handle * @return 0 when success, -1 when failure happens */ DGL_DLL int DGLFuncFree(DGLFunctionHandle func); -/*! +/** * @brief Call a Packed DGL Function. * * @param func node handle of the function. @@ -336,7 +336,7 @@ DGL_DLL int DGLFuncCall( DGLFunctionHandle func, DGLValue* arg_values, int* type_codes, int num_args, DGLValue* ret_val, int* ret_type_code); -/*! +/** * @brief Set the return value of DGLPackedCFunc. * * This function is called by DGLPackedCFunc to set the return value. @@ -350,7 +350,7 @@ DGL_DLL int DGLFuncCall( DGL_DLL int DGLCFuncSetReturn( DGLRetValueHandle ret, DGLValue* value, int* type_code, int num_ret); -/*! +/** * @brief Inplace translate callback argument value to return value. * This is only needed for non-POD arguments. * @@ -362,7 +362,7 @@ DGL_DLL int DGLCFuncSetReturn( */ DGL_DLL int DGLCbArgToReturn(DGLValue* value, int code); -/*! +/** * @brief C type of packed function. * * @param args The arguments @@ -378,13 +378,13 @@ typedef int (*DGLPackedCFunc)( DGLValue* args, int* type_codes, int num_args, DGLRetValueHandle ret, void* resource_handle); -/*! +/** * @brief C callback to free the resource handle in C packed function. * @param resource_handle The handle additional resouce handle from fron-end. */ typedef void (*DGLPackedCFuncFinalizer)(void* resource_handle); -/*! +/** * @brief Signature for extension function declarer. * * DGL call this function to get the extension functions @@ -395,7 +395,7 @@ typedef void (*DGLPackedCFuncFinalizer)(void* resource_handle); */ typedef int (*DGLExtensionFuncDeclarer)(DGLFunctionHandle register_func_handle); -/*! +/** * @brief Wrap a DGLPackedCFunc to become a FunctionHandle. * * The resource_handle will be managed by DGL API, until the function is no @@ -412,7 +412,7 @@ DGL_DLL int DGLFuncCreateFromCFunc( DGLPackedCFunc func, void* resource_handle, DGLPackedCFuncFinalizer fin, DGLFunctionHandle* out); -/*! +/** * @brief Register the function to runtime's global table. * * The registered function then can be pulled by the backend by the name. @@ -424,7 +424,7 @@ DGL_DLL int DGLFuncCreateFromCFunc( DGL_DLL int DGLFuncRegisterGlobal( const char* name, DGLFunctionHandle f, int override); -/*! +/** * @brief Get a global function. * * @param name The name of the function. @@ -435,7 +435,7 @@ DGL_DLL int DGLFuncRegisterGlobal( */ DGL_DLL int DGLFuncGetGlobal(const char* name, DGLFunctionHandle* out); -/*! +/** * @brief List all the globally registered function name * @param out_size The number of functions * @param out_array The array of function names. @@ -444,7 +444,7 @@ DGL_DLL int DGLFuncGetGlobal(const char* name, DGLFunctionHandle* out); DGL_DLL int DGLFuncListGlobalNames(int* out_size, const char*** out_array); // Array related apis for quick proptyping -/*! +/** * @brief Allocate a nd-array's memory, * including space of shape, of given spec. * @@ -462,7 +462,7 @@ DGL_DLL int DGLArrayAlloc( const dgl_index_t* shape, int ndim, int dtype_code, int dtype_bits, int dtype_lanes, int device_type, int device_id, DGLArrayHandle* out); -/*! +/** * @brief Allocate a nd-array's with shared memory, * including space of shape, of given spec. * @@ -480,14 +480,14 @@ int DGLArrayAllocSharedMem( const char* mem_name, const dgl_index_t* shape, int ndim, int dtype_code, int dtype_bits, int dtype_lanes, bool is_create, DGLArrayHandle* out); -/*! +/** * @brief Free the DGL Array. * @param handle The array handle to be freed. * @return 0 when success, -1 when failure happens */ DGL_DLL int DGLArrayFree(DGLArrayHandle handle); -/*! +/** * @brief Copy array data from CPU byte array. * @param handle The array handle. * @param data the data pointer @@ -497,7 +497,7 @@ DGL_DLL int DGLArrayFree(DGLArrayHandle handle); DGL_DLL int DGLArrayCopyFromBytes( DGLArrayHandle handle, void* data, size_t nbytes); -/*! +/** * @brief Copy array data to CPU byte array. * @param handle The array handle. * @param data the data pointer @@ -507,7 +507,7 @@ DGL_DLL int DGLArrayCopyFromBytes( DGL_DLL int DGLArrayCopyToBytes( DGLArrayHandle handle, void* data, size_t nbytes); -/*! +/** * @brief Copy the array, both from and to must be valid during the copy. * @param from The array to be copied from. * @param to The target space. @@ -515,7 +515,7 @@ DGL_DLL int DGLArrayCopyToBytes( */ DGL_DLL int DGLArrayCopyFromTo(DGLArrayHandle from, DGLArrayHandle to); -/*! +/** * @brief Create a new runtime stream. * * @param device_type The device type of context @@ -526,7 +526,7 @@ DGL_DLL int DGLArrayCopyFromTo(DGLArrayHandle from, DGLArrayHandle to); DGL_DLL int DGLStreamCreate( int device_type, int device_id, DGLStreamHandle* out); -/*! +/** * @brief Free a created stream handle. * * @param device_type The device type of context @@ -537,7 +537,7 @@ DGL_DLL int DGLStreamCreate( DGL_DLL int DGLStreamFree( int device_type, int device_id, DGLStreamHandle stream); -/*! +/** * @brief Set the runtime stream of current thread to be stream. * The subsequent calls to the same device_type * will use the setted stream handle. @@ -551,7 +551,7 @@ DGL_DLL int DGLStreamFree( DGL_DLL int DGLSetStream( int device_type, int device_id, DGLStreamHandle handle); -/*! +/** * @brief Get the runtime stream of current thread. * * @param device_type The device type of context @@ -562,7 +562,7 @@ DGL_DLL int DGLSetStream( DGL_DLL int DGLGetStream( int device_type, int device_id, DGLStreamHandle* handle); -/*! +/** * @brief Wait until all computations on stream completes. * * @param device_type The device type of context @@ -573,7 +573,7 @@ DGL_DLL int DGLGetStream( DGL_DLL int DGLSynchronize( int device_type, int device_id, DGLStreamHandle stream); -/*! +/** * @brief Synchronize two streams of execution. * * @param device_type The device type of context @@ -585,28 +585,28 @@ DGL_DLL int DGLSynchronize( DGL_DLL int DGLStreamStreamSynchronize( int device_type, int device_id, DGLStreamHandle src, DGLStreamHandle dst); -/*! +/** * @brief Load tensor adapter. * @return 0 when success, -1 when failure happens. */ DGL_DLL int DGLLoadTensorAdapter(const char* path); -/*! +/** * @brief Pin host memory. */ int DGLArrayPinData(DGLArrayHandle handle, DGLContext ctx); -/*! +/** * @brief Unpin host memory. */ int DGLArrayUnpinData(DGLArrayHandle handle, DGLContext ctx); -/*! +/** * @brief Record the stream that's using this tensor. */ int DGLArrayRecordStream(DGLArrayHandle handle, DGLStreamHandle stream); -/*! +/** * @brief Bug report macro. * * This serves as a sanity check on system side to make sure the code is correct diff --git a/include/dgl/runtime/config.h b/include/dgl/runtime/config.h index 7026916a0437..8cdd2a296026 100644 --- a/include/dgl/runtime/config.h +++ b/include/dgl/runtime/config.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file runtime/config.h * @brief DGL runtime config diff --git a/include/dgl/runtime/container.h b/include/dgl/runtime/container.h index 78ffcae38917..e7ab7dbb7a9c 100644 --- a/include/dgl/runtime/container.h +++ b/include/dgl/runtime/container.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file runtime/container.h * @brief Defines the container object data structures. @@ -18,7 +18,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief value object. * * It is typically used to wrap a non-Object type to Object type. @@ -26,14 +26,14 @@ namespace runtime { */ class ValueObject : public Object { public: - /*! @brief the value data */ + /** @brief the value data */ DGLRetValue data; static constexpr const char* _type_key = "Value"; DGL_DECLARE_OBJECT_TYPE_INFO(ValueObject, Object); }; -/*! @brief Construct a value object. */ +/** @brief Construct a value object. */ template inline std::shared_ptr MakeValue(T&& val) { auto obj = std::make_shared(); @@ -41,7 +41,7 @@ inline std::shared_ptr MakeValue(T&& val) { return obj; } -/*! @brief Vallue reference type */ +/** @brief Vallue reference type */ class Value : public ObjectRef { public: Value() {} @@ -54,10 +54,10 @@ class Value : public ObjectRef { using ContainerType = ValueObject; }; -/*! @brief list obj content in list */ +/** @brief list obj content in list */ class ListObject : public Object { public: - /*! @brief the data content */ + /** @brief the data content */ std::vector > data; void VisitAttrs(AttrVisitor* visitor) final { @@ -68,7 +68,7 @@ class ListObject : public Object { DGL_DECLARE_OBJECT_TYPE_INFO(ListObject, Object); }; -/*! @brief map obj content */ +/** @brief map obj content */ class MapObject : public Object { public: void VisitAttrs(AttrVisitor* visitor) final { @@ -89,35 +89,35 @@ class MapObject : public Object { } }; - /*! @brief The corresponding conatiner type */ + /** @brief The corresponding conatiner type */ using ContainerType = std::unordered_map< std::shared_ptr, std::shared_ptr, Hash, Equal>; - /*! @brief the data content */ + /** @brief the data content */ ContainerType data; static constexpr const char* _type_key = "Map"; DGL_DECLARE_OBJECT_TYPE_INFO(MapObject, Object); }; -/*! @brief specialized map obj with string as key */ +/** @brief specialized map obj with string as key */ class StrMapObject : public Object { public: void VisitAttrs(AttrVisitor* visitor) final { // Visitor to map have no effect. } - /*! @brief The corresponding conatiner type */ + /** @brief The corresponding conatiner type */ using ContainerType = std::unordered_map >; - /*! @brief the data content */ + /** @brief the data content */ ContainerType data; static constexpr const char* _type_key = "StrMap"; DGL_DECLARE_OBJECT_TYPE_INFO(StrMapObject, Object); }; -/*! +/** * @brief iterator adapter that adapts TIter to return another type. * @tparam Converter a struct that contains converting function * @tparam TIter the content iterator type. @@ -149,7 +149,7 @@ class IterAdapter { TIter iter_; }; -/*! +/** * @brief List container of ObjectRef. * * List implements copy on write semantics, which means list is mutable @@ -187,29 +187,29 @@ template < typename std::enable_if::value>::type> class List : public ObjectRef { public: - /*! + /** * @brief default constructor */ List() { obj_ = std::make_shared(); } - /*! + /** * @brief move constructor * @param other source */ List(List&& other) { // NOLINT(*) obj_ = std::move(other.obj_); } - /*! + /** * @brief copy constructor * @param other source */ List(const List& other) : ObjectRef(other.obj_) { // NOLINT(*) } - /*! + /** * @brief constructor from pointer * @param n the container pointer */ explicit List(std::shared_ptr n) : ObjectRef(n) {} - /*! + /** * @brief constructor from iterator * @param begin begin of iterator * @param end end of iterator @@ -219,21 +219,21 @@ class List : public ObjectRef { List(IterType begin, IterType end) { assign(begin, end); } - /*! + /** * @brief constructor from initializer list * @param init The initalizer list */ List(std::initializer_list init) { // NOLINT(*) assign(init.begin(), init.end()); } - /*! + /** * @brief constructor from vector * @param init The vector */ List(const std::vector& init) { // NOLINT(*) assign(init.begin(), init.end()); } - /*! + /** * @brief Constructs a container with n elements. Each element is a copy of * val \param n The size of the container \param val The init value */ @@ -244,7 +244,7 @@ class List : public ObjectRef { } obj_ = std::move(tmp_obj); } - /*! + /** * @brief move assign operator * @param other The source of assignment * @return reference to self. @@ -253,7 +253,7 @@ class List : public ObjectRef { obj_ = std::move(other.obj_); return *this; } - /*! + /** * @brief copy assign operator * @param other The source of assignment * @return reference to self. @@ -262,7 +262,7 @@ class List : public ObjectRef { obj_ = other.obj_; return *this; } - /*! + /** * @brief reset the list to content from iterator. * @param begin begin of iterator * @param end end of iterator @@ -276,7 +276,7 @@ class List : public ObjectRef { } obj_ = std::move(n); } - /*! + /** * @brief Read i-th element from list. * @param i The index * @return the i-th element. @@ -284,12 +284,12 @@ class List : public ObjectRef { inline const T operator[](size_t i) const { return T(static_cast(obj_.get())->data[i]); } - /*! @return The size of the list */ + /** @return The size of the list */ inline size_t size() const { if (obj_.get() == nullptr) return 0; return static_cast(obj_.get())->data.size(); } - /*! + /** * @brief copy on write semantics * Do nothing if current handle is the unique copy of the list. * Otherwise make a new copy of the list to ensure the current handle @@ -304,7 +304,7 @@ class List : public ObjectRef { } return static_cast(obj_.get()); } - /*! + /** * @brief push a new item to the back of the list * @param item The item to be pushed. */ @@ -312,7 +312,7 @@ class List : public ObjectRef { ListObject* n = this->CopyOnWrite(); n->data.push_back(item.obj_); } - /*! + /** * @brief set i-th element of the list. * @param i The index * @param value The value to be setted. @@ -321,13 +321,13 @@ class List : public ObjectRef { ListObject* n = this->CopyOnWrite(); n->data[i] = value.obj_; } - /*! @return whether list is empty */ + /** @return whether list is empty */ inline bool empty() const { return size() == 0; } - /*! @brief Copy the content to a vector */ + /** @brief Copy the content to a vector */ inline std::vector ToVector() const { return std::vector(begin(), end()); } - /*! @brief specify container obj */ + /** @brief specify container obj */ using ContainerType = ListObject; struct Ptr2ObjectRef { @@ -341,27 +341,27 @@ class List : public ObjectRef { Ptr2ObjectRef, std::vector >::const_reverse_iterator>; - /*! @return begin iterator */ + /** @return begin iterator */ inline iterator begin() const { return iterator(static_cast(obj_.get())->data.begin()); } - /*! @return end iterator */ + /** @return end iterator */ inline iterator end() const { return iterator(static_cast(obj_.get())->data.end()); } - /*! @return rbegin iterator */ + /** @return rbegin iterator */ inline reverse_iterator rbegin() const { return reverse_iterator( static_cast(obj_.get())->data.rbegin()); } - /*! @return rend iterator */ + /** @return rend iterator */ inline reverse_iterator rend() const { return reverse_iterator( static_cast(obj_.get())->data.rend()); } }; -/*! +/** * @brief Map container of ObjectRef->ObjectRef. * * Map implements copy on write semantics, which means map is mutable @@ -403,29 +403,29 @@ template < typename std::enable_if::value>::type> class Map : public ObjectRef { public: - /*! + /** * @brief default constructor */ Map() { obj_ = std::make_shared(); } - /*! + /** * @brief move constructor * @param other source */ Map(Map&& other) { // NOLINT(*) obj_ = std::move(other.obj_); } - /*! + /** * @brief copy constructor * @param other source */ Map(const Map& other) : ObjectRef(other.obj_) { // NOLINT(*) } - /*! + /** * @brief constructor from pointer * @param n the container pointer */ explicit Map(std::shared_ptr n) : ObjectRef(n) {} - /*! + /** * @brief constructor from iterator * @param begin begin of iterator * @param end end of iterator @@ -435,14 +435,14 @@ class Map : public ObjectRef { Map(IterType begin, IterType end) { assign(begin, end); } - /*! + /** * @brief constructor from initializer list * @param init The initalizer list */ Map(std::initializer_list > init) { // NOLINT(*) assign(init.begin(), init.end()); } - /*! + /** * @brief constructor from vector * @param init The vector */ @@ -450,7 +450,7 @@ class Map : public ObjectRef { Map(const std::unordered_map& init) { // NOLINT(*) assign(init.begin(), init.end()); } - /*! + /** * @brief move assign operator * @param other The source of assignment * @return reference to self. @@ -459,7 +459,7 @@ class Map : public ObjectRef { obj_ = std::move(other.obj_); return *this; } - /*! + /** * @brief copy assign operator * @param other The source of assignment * @return reference to self. @@ -468,7 +468,7 @@ class Map : public ObjectRef { obj_ = other.obj_; return *this; } - /*! + /** * @brief reset the list to content from iterator. * @param begin begin of iterator * @param end end of iterator @@ -482,7 +482,7 @@ class Map : public ObjectRef { } obj_ = std::move(n); } - /*! + /** * @brief Read element from map. * @param key The key * @return the corresonding element. @@ -490,7 +490,7 @@ class Map : public ObjectRef { inline const V operator[](const K& key) const { return V(static_cast(obj_.get())->data.at(key.obj_)); } - /*! + /** * @brief Read element from map. * @param key The key * @return the corresonding element. @@ -498,17 +498,17 @@ class Map : public ObjectRef { inline const V at(const K& key) const { return V(static_cast(obj_.get())->data.at(key.obj_)); } - /*! @return The size of the list */ + /** @return The size of the list */ inline size_t size() const { if (obj_.get() == nullptr) return 0; return static_cast(obj_.get())->data.size(); } - /*! @return The size of the list */ + /** @return The size of the list */ inline size_t count(const K& key) const { if (obj_.get() == nullptr) return 0; return static_cast(obj_.get())->data.count(key.obj_); } - /*! + /** * @brief copy on write semantics * Do nothing if current handle is the unique copy of the list. * Otherwise make a new copy of the list to ensure the current handle @@ -523,7 +523,7 @@ class Map : public ObjectRef { } return static_cast(obj_.get()); } - /*! + /** * @brief set the Map. * @param key The index key. * @param value The value to be setted. @@ -533,9 +533,9 @@ class Map : public ObjectRef { n->data[key.obj_] = value.obj_; } - /*! @return whether list is empty */ + /** @return whether list is empty */ inline bool empty() const { return size() == 0; } - /*! @brief specify container obj */ + /** @brief specify container obj */ using ContainerType = MapObject; struct Ptr2ObjectRef { @@ -549,15 +549,15 @@ class Map : public ObjectRef { using iterator = IterAdapter; - /*! @return begin iterator */ + /** @return begin iterator */ inline iterator begin() const { return iterator(static_cast(obj_.get())->data.begin()); } - /*! @return end iterator */ + /** @return end iterator */ inline iterator end() const { return iterator(static_cast(obj_.get())->data.end()); } - /*! @return begin iterator */ + /** @return begin iterator */ inline iterator find(const K& key) const { return iterator( static_cast(obj_.get())->data.find(key.obj_)); @@ -644,22 +644,22 @@ class Map : public ObjectRef { using iterator = IterAdapter; - /*! @return begin iterator */ + /** @return begin iterator */ inline iterator begin() const { return iterator(static_cast(obj_.get())->data.begin()); } - /*! @return end iterator */ + /** @return end iterator */ inline iterator end() const { return iterator(static_cast(obj_.get())->data.end()); } - /*! @return begin iterator */ + /** @return begin iterator */ inline iterator find(const std::string& key) const { return iterator( static_cast(obj_.get())->data.find(key)); } }; -/*! +/** * @brief Helper function to convert a List object to a vector. * @tparam T element type * @param list Input list object. diff --git a/include/dgl/runtime/device_api.h b/include/dgl/runtime/device_api.h index 16ed5d9600bb..816bc4f31284 100644 --- a/include/dgl/runtime/device_api.h +++ b/include/dgl/runtime/device_api.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2016 by Contributors * @file dgl/runtime/device_api.h * @brief Abstract device memory management API @@ -13,7 +13,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief the query type into GetAttr */ enum DeviceAttrKind : int { @@ -28,34 +28,34 @@ enum DeviceAttrKind : int { kMaxThreadDimensions = 8 }; -/*! @brief Number of bytes each allocation must align to */ +/** @brief Number of bytes each allocation must align to */ constexpr int kAllocAlignment = 64; -/*! @brief Number of bytes each allocation must align to in temporary allocation +/** @brief Number of bytes each allocation must align to in temporary allocation */ constexpr int kTempAllocaAlignment = 64; -/*! @brief Maximum size that can be allocated on stack */ +/** @brief Maximum size that can be allocated on stack */ constexpr int kMaxStackAlloca = 1024; -/*! +/** * @brief DGL Runtime Device API, abstracts the device * specific interface for memory management. */ class DeviceAPI { public: - /*! @brief virtual destructor */ + /** @brief virtual destructor */ virtual ~DeviceAPI() {} - /*! + /** * @brief Check whether the device is available. */ virtual bool IsAvailable() { return true; } - /*! + /** * @brief Set the environment device id to ctx * @param ctx The context to be set. */ virtual void SetDevice(DGLContext ctx) = 0; - /*! + /** * @brief Get attribute of specified device. * @param ctx The device context * @param kind The result kind @@ -64,7 +64,7 @@ class DeviceAPI { */ virtual void GetAttr( DGLContext ctx, DeviceAttrKind kind, DGLRetValue* rv) = 0; - /*! + /** * @brief Allocate a data space on device. * @param ctx The device context to perform operation. * @param nbytes The number of bytes in memory. @@ -76,13 +76,13 @@ class DeviceAPI { virtual void* AllocDataSpace( DGLContext ctx, size_t nbytes, size_t alignment, DGLDataType type_hint) = 0; - /*! + /** * @brief Free a data space on device. * @param ctx The device context to perform operation. * @param ptr The data space. */ virtual void FreeDataSpace(DGLContext ctx, void* ptr) = 0; - /*! + /** * @brief copy data from one place to another * @param from The source array. * @param from_offset The byte offeset in the from. @@ -98,14 +98,14 @@ class DeviceAPI { const void* from, size_t from_offset, void* to, size_t to_offset, size_t num_bytes, DGLContext ctx_from, DGLContext ctx_to, DGLDataType type_hint) = 0; - /*! + /** * @brief Create a new stream of execution. * * @param ctx The context of allocation. */ DGL_DLL virtual DGLStreamHandle CreateStream(DGLContext ctx); - /*! + /** * @brief Free a stream of execution * * @param ctx The context of the stream @@ -113,23 +113,23 @@ class DeviceAPI { */ DGL_DLL virtual void FreeStream(DGLContext ctx, DGLStreamHandle stream); - /*! + /** * @brief Synchronize the stream * @param ctx The context to perform operation. * @param stream The stream to be sync. */ virtual void StreamSync(DGLContext ctx, DGLStreamHandle stream) = 0; - /*! + /** * @brief Set the stream * @param ctx The context to set stream. * @param stream The stream to be set. */ virtual void SetStream(DGLContext ctx, DGLStreamHandle stream) {} - /*! + /** * @brief Get the stream */ virtual DGLStreamHandle GetStream() const { return nullptr; } - /*! + /** * @brief Synchronize 2 streams of execution. * * An event is created in event_src stream that the second then @@ -144,7 +144,7 @@ class DeviceAPI { DGL_DLL virtual void SyncStreamFromTo( DGLContext ctx, DGLStreamHandle event_src, DGLStreamHandle event_dst); - /*! + /** * @brief Pin host memory using cudaHostRegister(). * * @param ptr The host memory pointer to be pinned. @@ -152,19 +152,19 @@ class DeviceAPI { */ DGL_DLL virtual void PinData(void* ptr, size_t nbytes); - /*! + /** * @brief Unpin host memory using cudaHostUnregister(). * * @param ptr The host memory pointer to be unpinned. */ DGL_DLL virtual void UnpinData(void* ptr); - /*! + /** * @brief Check whether the memory is in pinned memory. */ DGL_DLL virtual bool IsPinned(const void* ptr) { return false; } - /*! + /** * @brief Allocate temporal workspace for backend execution. * * \note We have the following assumption about backend temporal @@ -183,7 +183,7 @@ class DeviceAPI { */ DGL_DLL virtual void* AllocWorkspace( DGLContext ctx, size_t nbytes, DGLDataType type_hint = {}); - /*! + /** * @brief Free temporal workspace in backend execution. * * @param ctx The context of allocation. @@ -191,7 +191,7 @@ class DeviceAPI { */ DGL_DLL virtual void FreeWorkspace(DGLContext ctx, void* ptr); - /*! + /** * @brief Get device API based on context. * @param ctx The context * @param allow_missing Whether allow missing @@ -199,7 +199,7 @@ class DeviceAPI { */ DGL_DLL static DeviceAPI* Get(DGLContext ctx, bool allow_missing = false); - /*! + /** * @brief Get device API based on context. * @param dev_type The device type * @param allow_missing Whether allow missing @@ -209,7 +209,7 @@ class DeviceAPI { DGLDeviceType dev_type, bool allow_missing = false); }; -/*! @brief The device type bigger than this is RPC device */ +/** @brief The device type bigger than this is RPC device */ constexpr int kRPCSessMask = 128; } // namespace runtime } // namespace dgl diff --git a/include/dgl/runtime/dlpack_convert.h b/include/dgl/runtime/dlpack_convert.h index ad9bc53619cd..a3f576379f76 100644 --- a/include/dgl/runtime/dlpack_convert.h +++ b/include/dgl/runtime/dlpack_convert.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2022 by Contributors * @file include/dgl/runtime/dlpack_convert.h * @brief Conversion between NDArray and DLPack. @@ -15,7 +15,7 @@ namespace dgl { namespace runtime { struct DLPackConvert { - /*! + /** * @brief Create a DGL NDArray from a DLPack tensor. * * This allows us to create a NDArray using the memory @@ -28,7 +28,7 @@ struct DLPackConvert { */ static NDArray FromDLPack(DLManagedTensor* tensor); - /*! + /** * @brief Deleter for NDArray converted from DLPack. * * This is used from data which is passed from external @@ -38,7 +38,7 @@ struct DLPackConvert { */ static void DLPackDeleter(NDArray::Container* ptr); - /*! @brief Convert a DGL NDArray to a DLPack tensor. + /** @brief Convert a DGL NDArray to a DLPack tensor. * * @param from The DGL NDArray. * @return A DLPack tensor. @@ -53,13 +53,13 @@ struct DLPackConvert { extern "C" { #endif -/*! +/** * @brief Delete (free) a DLManagedTensor's data. * @param dltensor Pointer to the DLManagedTensor. */ DGL_DLL void DGLDLManagedTensorCallDeleter(DLManagedTensor* dltensor); -/*! +/** * @brief Produce an array from the DLManagedTensor that shares data memory * with the DLManagedTensor. * @param from The source DLManagedTensor. @@ -68,7 +68,7 @@ DGL_DLL void DGLDLManagedTensorCallDeleter(DLManagedTensor* dltensor); */ DGL_DLL int DGLArrayFromDLPack(DLManagedTensor* from, DGLArrayHandle* out); -/*! +/** * @brief Produce a DLMangedTensor from the array that shares data memory with * the array. * @param from The source array. diff --git a/include/dgl/runtime/module.h b/include/dgl/runtime/module.h index 9b7e7ac4d4d2..a4558bb1f585 100644 --- a/include/dgl/runtime/module.h +++ b/include/dgl/runtime/module.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/runtime/module.h * @brief Runtime container of the functions generated by DGL, @@ -24,7 +24,7 @@ namespace runtime { class ModuleNode; class PackedFunc; -/*! +/** * @brief Module container of DGL. */ class Module { @@ -32,7 +32,7 @@ class Module { Module() {} // constructor from container. explicit Module(std::shared_ptr n) : node_(n) {} - /*! + /** * @brief Get packed function from current module by name. * * @param name The name of the function. @@ -43,12 +43,12 @@ class Module { */ inline PackedFunc GetFunction( const std::string& name, bool query_imports = false); - /*! @return internal container */ + /** @return internal container */ inline ModuleNode* operator->(); - /*! @return internal container */ + /** @return internal container */ inline const ModuleNode* operator->() const; // The following functions requires link with runtime. - /*! + /** * @brief Import another module into this module. * @param other The module to be imported. * @@ -56,7 +56,7 @@ class Module { * An error will be thrown when cyclic dependency is detected. */ DGL_DLL void Import(Module other); - /*! + /** * @brief Load a module from file. * @param file_name The name of the host function module. * @param format The format of the file. @@ -70,17 +70,17 @@ class Module { std::shared_ptr node_; }; -/*! +/** * @brief Base node container of module. * Do not create this directly, instead use Module. */ class ModuleNode { public: - /*! @brief virtual destructor */ + /** @brief virtual destructor */ virtual ~ModuleNode() {} - /*! @return The module type key */ + /** @return The module type key */ virtual const char* type_key() const = 0; - /*! + /** * @brief Get a PackedFunc from module. * * The PackedFunc may not be fully initialized, @@ -100,14 +100,14 @@ class ModuleNode { virtual PackedFunc GetFunction( const std::string& name, const std::shared_ptr& sptr_to_self) = 0; - /*! + /** * @brief Save the module to file. * @param file_name The file to be saved to. * @param format The format of the file. */ virtual void SaveToFile( const std::string& file_name, const std::string& format); - /*! + /** * @brief Save the module to binary stream. * @param stream The binary stream to save to. * @note It is recommended to implement this for device modules, @@ -115,13 +115,13 @@ class ModuleNode { * We can use this to do AOT loading of bundled device functions. */ DGL_DLL virtual void SaveToBinary(dmlc::Stream* stream); - /*! + /** * @brief Get the source code of module, when available. * @param format Format of the source code, can be empty by default. * @return Possible source code when available. */ DGL_DLL virtual std::string GetSource(const std::string& format = ""); - /*! + /** * @brief Get a function from current environment * The environment includes all the imports as well as Global functions. * @@ -129,37 +129,37 @@ class ModuleNode { * @return The corresponding function. */ DGL_DLL const PackedFunc* GetFuncFromEnv(const std::string& name); - /*! @return The module it imports from */ + /** @return The module it imports from */ const std::vector& imports() const { return imports_; } protected: friend class Module; - /*! @brief The modules this module depend on */ + /** @brief The modules this module depend on */ std::vector imports_; private: - /*! @brief Cache used by GetImport */ + /** @brief Cache used by GetImport */ std::unordered_map > import_cache_; }; -/*! @brief namespace for constant symbols */ +/** @brief namespace for constant symbols */ namespace symbol { -/*! @brief Global variable to store module context. */ +/** @brief Global variable to store module context. */ constexpr const char* dgl_module_ctx = "__dgl_module_ctx"; -/*! @brief Global variable to store device module blob */ +/** @brief Global variable to store device module blob */ constexpr const char* dgl_dev_mblob = "__dgl_dev_mblob"; -/*! @brief Number of bytes of device module blob. */ +/** @brief Number of bytes of device module blob. */ constexpr const char* dgl_dev_mblob_nbytes = "__dgl_dev_mblob_nbytes"; -/*! @brief global function to set device */ +/** @brief global function to set device */ constexpr const char* dgl_set_device = "__dgl_set_device"; -/*! @brief Auxiliary counter to global barrier. */ +/** @brief Auxiliary counter to global barrier. */ constexpr const char* dgl_global_barrier_state = "__dgl_global_barrier_state"; -/*! +/** * @brief Prepare the global barrier before kernels that uses global barrier. */ constexpr const char* dgl_prepare_global_barrier = "__dgl_prepare_global_barrier"; -/*! @brief Placeholder for the module's entry function. */ +/** @brief Placeholder for the module's entry function. */ constexpr const char* dgl_module_main = "__dgl_main__"; } // namespace symbol diff --git a/include/dgl/runtime/ndarray.h b/include/dgl/runtime/ndarray.h index 4a8c6ec6b8d0..ec45050ad5d5 100644 --- a/include/dgl/runtime/ndarray.h +++ b/include/dgl/runtime/ndarray.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017-2022 by Contributors * @file dgl/runtime/ndarray.h * @brief Abstract device memory management API @@ -33,7 +33,7 @@ inline std::ostream& operator << (std::ostream& os, DGLDataType t); namespace dgl { -/*! +/** * @brief Type traits that converts a C type to a DGLDataType. * * Usage: @@ -69,12 +69,12 @@ GEN_DGLDATATYPETRAITS_FOR(double, kDGLFloat, 64); namespace runtime { -/*! +/** * @brief DLPack converter. */ struct DLPackConvert; -/*! +/** * @brief Managed NDArray. * The array is backed by reference counted blocks. */ @@ -82,19 +82,19 @@ class NDArray { public: // internal container type struct Container; - /*! @brief default constructor */ + /** @brief default constructor */ NDArray() {} - /*! + /** * @brief cosntruct a NDArray that refers to data * @param data The data this NDArray refers to */ explicit inline NDArray(Container* data); - /*! + /** * @brief copy constructor * @param other The value to be copied */ inline NDArray(const NDArray& other); // NOLINT(*) - /*! + /** * @brief move constructor * @param other The value to be moved */ @@ -102,18 +102,18 @@ class NDArray { : data_(other.data_) { other.data_ = nullptr; } - /*! @brief destructor */ + /** @brief destructor */ ~NDArray() { this->reset(); } - /*! + /** * @brief Swap this array with another NDArray * @param other The other NDArray */ void swap(NDArray& other) { // NOLINT(*) std::swap(data_, other.data_); } - /*! + /** * @brief copy assignmemt * @param other The value to be assigned. * @return reference to self. @@ -123,7 +123,7 @@ class NDArray { NDArray(other).swap(*this); // NOLINT(*) return *this; } - /*! + /** * @brief move assignmemt * @param other The value to be assigned. * @return reference to self. @@ -133,26 +133,26 @@ class NDArray { NDArray(std::move(other)).swap(*this); // NOLINT(*) return *this; } - /*! @return If NDArray is defined */ + /** @return If NDArray is defined */ bool defined() const { return data_ != nullptr; } - /*! @return If both NDArray reference the same container */ + /** @return If both NDArray reference the same container */ bool same_as(const NDArray& other) const { return data_ == other.data_; } - /*! @brief reset the content of NDArray to be nullptr */ + /** @brief reset the content of NDArray to be nullptr */ inline void reset(); - /*! + /** * @return the reference counter * @note this number is approximate in multi-threaded setting. */ inline int use_count() const; - /*! @return Pointer to content of DGLArray */ + /** @return Pointer to content of DGLArray */ inline const DGLArray* operator->() const; - /*! @return True if the ndarray is contiguous. */ + /** @return True if the ndarray is contiguous. */ bool IsContiguous() const; - /*! @return the data pointer with type. */ + /** @return the data pointer with type. */ template inline T* Ptr() const { if (!defined()) @@ -160,7 +160,7 @@ class NDArray { else return static_cast(operator->()->data); } - /*! + /** * @brief Copy data content from/into another array. * @param other The source array to be copied from. * @note The copy runs on the dgl internal stream if it involves a GPU context. @@ -170,17 +170,17 @@ class NDArray { inline void CopyTo(DGLArray *other) const; inline void CopyTo(const NDArray &other) const; - /*! + /** * @brief Copy the data to another context. * @param ctx The target context. * @return The array under another context. */ inline NDArray CopyTo(const DGLContext &ctx) const; - /*! + /** * @brief Return a new array with a copy of the content. */ inline NDArray Clone() const; - /*! + /** * @brief In-place method to pin the current array by calling PinContainer * on the underlying NDArray:Container. * @note This is an in-place method. Behavior depends on the current context, @@ -189,7 +189,7 @@ class NDArray { * kDGLCUDA: invalid, will throw an error. */ inline void PinMemory_(); - /*! + /** * @brief In-place method to unpin the current array by calling UnpinContainer * on the underlying NDArray:Container. * @note This is an in-place method. Behavior depends on the current context, @@ -197,27 +197,27 @@ class NDArray { * others: directly return. */ inline void UnpinMemory_(); - /*! + /** * @brief Check if the array is pinned. */ inline bool IsPinned() const; - /*! + /** * @brief Record streams that are using the underlying tensor. * @param stream The stream that is using the underlying tensor. */ inline void RecordStream(DGLStreamHandle stream) const; - /*! + /** * @brief Load NDArray from stream * @param stream The input data stream * @return Whether load is successful */ bool Load(dmlc::Stream* stream); - /*! + /** * @brief Save NDArray to stream * @param stream The output data stream */ void Save(dmlc::Stream* stream) const; - /*! + /** * @brief Create a NDArray that shares the data memory with the current one. * @param shape The shape of the new array. * @param dtype The data type of the new array. @@ -226,7 +226,7 @@ class NDArray { */ DGL_DLL NDArray CreateView( std::vector shape, DGLDataType dtype, int64_t offset = 0); - /*! + /** * @brief Create an empty NDArray. * @param shape The shape of the new array. * @param dtype The data type of the new array. @@ -236,7 +236,7 @@ class NDArray { DGL_DLL static NDArray Empty(std::vector shape, DGLDataType dtype, DGLContext ctx); - /*! + /** * @brief Create an empty NDArray with shared memory. * @param name The name of shared memory. * @param shape The shape of the new array. @@ -250,17 +250,17 @@ class NDArray { DGLDataType dtype, DGLContext ctx, bool is_create); - /*! + /** * @brief Get the size of the array in the number of bytes. */ size_t GetSize() const; - /*! + /** * @brief Get the number of elements in this array. */ int64_t NumElements() const; - /*! + /** * @brief Create a NDArray by copying from std::vector. * @tparam T Type of vector data. Determines the dtype of returned array. */ @@ -268,13 +268,13 @@ class NDArray { DGL_DLL static NDArray FromVector( const std::vector& vec, DGLContext ctx = DGLContext{kDGLCPU, 0}); - /*! + /** * @brief Create a NDArray from a raw pointer. */ DGL_DLL static NDArray CreateFromRaw(const std::vector& shape, DGLDataType dtype, DGLContext ctx, void* raw, bool auto_free); - /*! + /** * @brief Create a std::vector from a 1D NDArray. * @tparam T Type of vector data. * @note Type casting is NOT performed. The caller has to make sure that the vector @@ -285,7 +285,7 @@ class NDArray { std::shared_ptr GetSharedMem() const; - /*! + /** * @brief Function to copy data from one array to another. * @param from The source array. * @param to The target array. @@ -296,7 +296,7 @@ class NDArray { DGL_DLL static void CopyFromTo( DGLArray* from, DGLArray* to, DGLStreamHandle stream); - /*! + /** * @brief Function to pin the DGLArray of a Container. * @param ptr The container to be pinned. * @note Data of the given array will be pinned inplace. @@ -307,7 +307,7 @@ class NDArray { */ DGL_DLL static void PinContainer(Container* ptr); - /*! + /** * @brief Function to unpin the DGLArray of a Container. * @param ptr The container to be unpinned. * @note Data of the given array will be unpinned inplace. @@ -317,14 +317,14 @@ class NDArray { */ DGL_DLL static void UnpinContainer(Container* ptr); - /*! + /** * @brief Function check if the DGLArray of a Container is pinned. * @param ptr The container to be checked. * @return true if pinned. */ DGL_DLL static bool IsContainerPinned(Container* ptr); - /*! + /** * @brief Record streams that are using this tensor. * @param ptr Pointer of the tensor to be recorded. * @param stream The stream that is using this tensor. @@ -344,7 +344,7 @@ class NDArray { }; private: - /*! @brief Internal Data content */ + /** @brief Internal Data content */ Container* data_{nullptr}; // enable internal functions friend struct Internal; @@ -353,7 +353,7 @@ class NDArray { friend class DGLArgsSetter; }; -/*! +/** * @brief Save a DGLArray to stream * @param strm The outpu stream * @param tensor The tensor to be saved. @@ -361,7 +361,7 @@ class NDArray { inline bool SaveDGLArray(dmlc::Stream* strm, const DGLArray* tensor); -/*! +/** * @brief Reference counted Container object used to back NDArray. * * This object is DGLArray compatible: @@ -372,25 +372,25 @@ inline bool SaveDGLArray(dmlc::Stream* strm, const DGLArray* tensor); */ struct NDArray::Container { public: - /*! NOTE: the first part of this structure is the same as + /** NOTE: the first part of this structure is the same as * DLManagedTensor, note that, however, the deleter * is only called when the reference counter goes to 0 */ - /*! + /** * @brief Tensor structure. * @note it is important that the first field is DGLArray * So that this data structure is DGLArray compatible. * The head ptr of this struct can be viewed as DGLArray*. */ DGLArray dl_tensor; - /*! + /** * @brief addtional context, reserved for recycling * @note We can attach additional content here * which the current container depend on * (e.g. reference to original memory when creating views). */ void* manager_ctx{nullptr}; - /*! + /** * @brief Customized deleter * * @note The customized deleter is helpful to enable @@ -398,7 +398,7 @@ struct NDArray::Container { * currently defined by the system. */ void (*deleter)(Container* self) = nullptr; - /*! @brief default constructor */ + /** @brief default constructor */ Container() { dl_tensor.data = nullptr; dl_tensor.ndim = 0; @@ -406,13 +406,13 @@ struct NDArray::Container { dl_tensor.strides = nullptr; dl_tensor.byte_offset = 0; } - /*! @brief pointer to shared memory */ + /** @brief pointer to shared memory */ std::shared_ptr mem; - /*! @brief developer function, increases reference counter */ + /** @brief developer function, increases reference counter */ void IncRef() { ref_counter_.fetch_add(1, std::memory_order_relaxed); } - /*! @brief developer function, decrease reference counter */ + /** @brief developer function, decrease reference counter */ void DecRef() { if (ref_counter_.fetch_sub(1, std::memory_order_release) == 1) { std::atomic_thread_fence(std::memory_order_acquire); @@ -426,17 +426,17 @@ struct NDArray::Container { friend struct DLPackConvert; friend class NDArray; friend class RPCWrappedFunc; - /*! + /** * @brief The shape container, * can be used for shape data. */ std::vector shape_; - /*! + /** * @brief The stride container, * can be used for stride data. */ std::vector stride_; - /*! @brief The internal array object */ + /** @brief The internal array object */ std::atomic ref_counter_{0}; bool pinned_by_dgl_{false}; @@ -527,7 +527,7 @@ inline const DGLArray* NDArray::operator->() const { return &(data_->dl_tensor); } -/*! @brief Magic number for NDArray file */ +/** @brief Magic number for NDArray file */ constexpr uint64_t kDGLNDArrayMagic = 0xDD5E40F096B4A13F; inline bool SaveDGLArray(dmlc::Stream* strm, @@ -579,7 +579,7 @@ inline bool SaveDGLArray(dmlc::Stream* strm, return true; } -/*! +/** * @brief Convert type code to its name * @param type_code The type code . * @return The name of type code. @@ -605,7 +605,7 @@ inline const char* TypeCode2Str(int type_code) { } } -/*! +/** * @brief Convert device type code to its name * @param device_type The device type code. * @return The name of the device. @@ -619,7 +619,7 @@ inline const char* DeviceTypeCode2Str(DGLDeviceType device_type) { } } -/*! +/** * @brief convert a string to DGL type. * @param s The string to be converted. * @return The corresponding dgl type. @@ -651,7 +651,7 @@ inline DGLDataType String2DGLDataType(std::string s) { return t; } -/*! +/** * @brief convert a DGL type to string. * @param t The type to be converted. * @return The corresponding dgl type in string. @@ -737,12 +737,12 @@ std::ostream& operator << (std::ostream& os, dgl::runtime::NDArray array); ///////////////// Operator overloading for DGLDataType ///////////////// -/*! @brief Check whether two data types are the same.*/ +/** @brief Check whether two data types are the same.*/ inline bool operator == (const DGLDataType& ty1, const DGLDataType& ty2) { return ty1.code == ty2.code && ty1.bits == ty2.bits && ty1.lanes == ty2.lanes; } -/*! @brief Check whether two data types are different.*/ +/** @brief Check whether two data types are different.*/ inline bool operator != (const DGLDataType& ty1, const DGLDataType& ty2) { return !(ty1 == ty2); } @@ -761,12 +761,12 @@ inline std::ostream& operator << (std::ostream& os, DGLDataType t) { ///////////////// Operator overloading for DGLContext ///////////////// -/*! @brief Check whether two device contexts are the same.*/ +/** @brief Check whether two device contexts are the same.*/ inline bool operator == (const DGLContext& ctx1, const DGLContext& ctx2) { return ctx1.device_type == ctx2.device_type && ctx1.device_id == ctx2.device_id; } -/*! @brief Check whether two device contexts are different.*/ +/** @brief Check whether two device contexts are different.*/ inline bool operator != (const DGLContext& ctx1, const DGLContext& ctx2) { return !(ctx1 == ctx2); } diff --git a/include/dgl/runtime/object.h b/include/dgl/runtime/object.h index 9b724ecfdf3d..785e040cdd30 100644 --- a/include/dgl/runtime/object.h +++ b/include/dgl/runtime/object.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file runtime/object.h * @brief Defines the Object data structures. @@ -21,7 +21,7 @@ class Object; class ObjectRef; class NDArray; -/*! +/** * @brief Visitor class to each object attribute. * The content is going to be called for each field. */ @@ -48,26 +48,26 @@ class AttrVisitor { //! \endcond }; -/*! +/** * @brief base class of object container. * All object's internal is stored as std::shared_ptr */ class Object { public: - /*! @brief virtual destructor */ + /** @brief virtual destructor */ virtual ~Object() {} - /*! @return The unique type key of the object */ + /** @return The unique type key of the object */ virtual const char* type_key() const = 0; - /*! + /** * @brief Apply visitor to each field of the Object * Visitor could mutate the content of the object. * override if Object contains attribute fields. * @param visitor The visitor */ virtual void VisitAttrs(AttrVisitor* visitor) {} - /*! @return the type index of the object */ + /** @return the type index of the object */ virtual uint32_t type_index() const = 0; - /*! + /** * @brief Whether this object derives from object with type_index=tid. * Implemented by DGL_DECLARE_OBJECT_TYPE_INFO * @@ -75,24 +75,24 @@ class Object { * @return the check result. */ virtual bool _DerivedFrom(uint32_t tid) const; - /*! + /** * @brief get a runtime unique type index given a type key * @param type_key Type key of a type. * @return the corresponding type index. */ static uint32_t TypeKey2Index(const char* type_key); - /*! + /** * @brief get type key from type index. * @param index The type index * @return the corresponding type key. */ static const char* TypeIndex2Key(uint32_t index); - /*! + /** * @return whether the type is derived from */ template inline bool derived_from() const; - /*! + /** * @return whether the object is of type T * @tparam The type to be checked. */ @@ -103,12 +103,12 @@ class Object { static constexpr const char* _type_key = "Object"; }; -/*! @brief base class of all reference object */ +/** @brief base class of all reference object */ class ObjectRef { public: - /*! @brief type indicate the container type */ + /** @brief type indicate the container type */ using ContainerType = Object; - /*! + /** * @brief Comparator * * Compare with the two are referencing to the same object (compare by @@ -119,7 +119,7 @@ class ObjectRef { * @sa same_as */ inline bool operator==(const ObjectRef& other) const; - /*! + /** * @brief Comparator * * Compare with the two are referencing to the same object (compare by @@ -129,7 +129,7 @@ class ObjectRef { * @return the compare result. */ inline bool same_as(const ObjectRef& other) const; - /*! + /** * @brief Comparator * * The operator overload allows ObjectRef be used in std::map. @@ -138,24 +138,24 @@ class ObjectRef { * @return the compare result. */ inline bool operator<(const ObjectRef& other) const; - /*! + /** * @brief Comparator * @param other Another object ref. * @return the compare result. * @sa same_as */ inline bool operator!=(const ObjectRef& other) const; - /*! @return the hash function for ObjectRef */ + /** @return the hash function for ObjectRef */ inline size_t hash() const; - /*! @return whether the expression is null */ + /** @return whether the expression is null */ inline bool defined() const; - /*! @return the internal type index of Object */ + /** @return the internal type index of Object */ inline uint32_t type_index() const; - /*! @return the internal object pointer */ + /** @return the internal object pointer */ inline const Object* get() const; - /*! @return the internal object pointer */ + /** @return the internal object pointer */ inline const Object* operator->() const; - /*! + /** * @brief Downcast this object to its actual type. * This returns nullptr if the object is not of the requested type. * Example usage: @@ -168,15 +168,15 @@ class ObjectRef { template inline const T* as() const; - /*! @brief default constructor */ + /** @brief default constructor */ ObjectRef() = default; explicit ObjectRef(std::shared_ptr obj) : obj_(obj) {} - /*! @brief the internal object, do not touch */ + /** @brief the internal object, do not touch */ std::shared_ptr obj_; }; -/*! +/** * @brief helper macro to declare type information in a base object. * * This is macro should be used in abstract base class definition @@ -189,7 +189,7 @@ class ObjectRef { return Parent::_DerivedFrom(tid); \ } -/*! +/** * @brief helper macro to declare type information in a terminal class * * This is macro should be used in terminal class definition. @@ -222,7 +222,7 @@ class ObjectRef { return Parent::_DerivedFrom(tid); \ } -/*! @brief Macro to generate common object reference class method definition */ +/** @brief Macro to generate common object reference class method definition */ #define DGL_DEFINE_OBJECT_REF_METHODS(TypeName, BaseTypeName, ObjectName) \ TypeName() {} \ explicit TypeName(std::shared_ptr obj) \ @@ -237,7 +237,7 @@ class ObjectRef { operator bool() const { return this->defined(); } \ using ContainerType = ObjectName -/*! @brief Macro to generate object reference class definition */ +/** @brief Macro to generate object reference class definition */ #define DGL_DEFINE_OBJECT_REF(TypeName, ObjectName) \ class TypeName : public ::dgl::runtime::ObjectRef { \ public: \ @@ -300,12 +300,12 @@ inline const T* ObjectRef::as() const { return nullptr; } -/*! @brief The hash function for nodes */ +/** @brief The hash function for nodes */ struct ObjectHash { size_t operator()(const ObjectRef& a) const { return a.hash(); } }; -/*! @brief The equal comparator for nodes */ +/** @brief The equal comparator for nodes */ struct ObjectEqual { bool operator()(const ObjectRef& a, const ObjectRef& b) const { return a.get() == b.get(); diff --git a/include/dgl/runtime/packed_func.h b/include/dgl/runtime/packed_func.h index 35ce34a98eab..1e8c91d127d4 100644 --- a/include/dgl/runtime/packed_func.h +++ b/include/dgl/runtime/packed_func.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/runtime/packed_func.h * @brief Type-erased function used across DGL API. @@ -41,7 +41,7 @@ class DGLArgValue; class DGLRetValue; class DGLArgsSetter; -/*! +/** * @brief Packed function is a type-erased function. * The arguments are passed by packed format. * @@ -51,7 +51,7 @@ class DGLArgsSetter; */ class PackedFunc { public: - /*! + /** * @brief The internal std::function * @param args The arguments to the function. * @param rv The return value. @@ -70,14 +70,14 @@ class PackedFunc { * @endcode */ using FType = std::function; - /*! @brief default constructor */ + /** @brief default constructor */ PackedFunc() {} - /*! + /** * @brief constructing a packed function from a std::function. * @param body the internal container of packed function. */ explicit PackedFunc(FType body) : body_(body) {} - /*! + /** * @brief Call packed function by directly passing in unpacked format. * @param args Arguments to be passed. * @tparam Args arguments to be passed. @@ -93,32 +93,32 @@ class PackedFunc { */ template inline DGLRetValue operator()(Args&&... args) const; - /*! + /** * @brief Call the function in packed format. * @param args The arguments * @param rv The return value. */ inline void CallPacked(DGLArgs args, DGLRetValue* rv) const; - /*! @return the internal body function */ + /** @return the internal body function */ inline FType body() const; - /*! @return Whether the packed function is nullptr */ + /** @return Whether the packed function is nullptr */ bool operator==(std::nullptr_t null) const { return body_ == nullptr; } - /*! @return Whether the packed function is not nullptr */ + /** @return Whether the packed function is not nullptr */ bool operator!=(std::nullptr_t null) const { return body_ != nullptr; } private: - /*! @brief internal container of packed function */ + /** @brief internal container of packed function */ FType body_; }; -/*! +/** * @brief Please refer to \ref TypedPackedFuncAnchor * "TypedPackedFunc" */ template class TypedPackedFunc; -/*! +/** * @anchor TypedPackedFuncAnchor * @brief A PackedFunc wrapper to provide typed function signature. * It is backed by a PackedFunc internally. @@ -153,11 +153,11 @@ class TypedPackedFunc; template class TypedPackedFunc { public: - /*! @brief short hand for this function type */ + /** @brief short hand for this function type */ using TSelf = TypedPackedFunc; - /*! @brief default constructor */ + /** @brief default constructor */ TypedPackedFunc() {} - /*! + /** * @brief construct by wrap a PackedFunc * * Example usage: @@ -175,7 +175,7 @@ class TypedPackedFunc { * @param packed The packed function */ inline explicit TypedPackedFunc(PackedFunc packed); - /*! + /** * @brief construct from a lambda function with the same signature. * * Example usage: @@ -196,7 +196,7 @@ class TypedPackedFunc { explicit TypedPackedFunc(const FLambda& typed_lambda) { this->AssignTypedLambda(typed_lambda); } - /*! + /** * @brief copy assignment operator from typed lambda * * Example usage: @@ -220,7 +220,7 @@ class TypedPackedFunc { this->AssignTypedLambda(typed_lambda); return *this; } - /*! + /** * @brief copy assignment operator from PackedFunc. * @param packed The packed function. * @returns reference to self. @@ -229,27 +229,27 @@ class TypedPackedFunc { packed_ = packed; return *this; } - /*! + /** * @brief Invoke the operator. * @param args The arguments * @returns The return value. */ inline R operator()(Args... args) const; - /*! + /** * @brief convert to PackedFunc * @return the internal PackedFunc */ operator PackedFunc() const { return packed(); } - /*! + /** * @return reference the internal PackedFunc */ const PackedFunc& packed() const { return packed_; } private: friend class DGLRetValue; - /*! @brief The internal packed function */ + /** @brief The internal packed function */ PackedFunc packed_; - /*! + /** * @brief Assign the packed field using a typed lambda function. * * @param flambda The lambda function. @@ -260,13 +260,13 @@ class TypedPackedFunc { inline void AssignTypedLambda(FLambda flambda); }; -/*! @brief Arguments into DGL functions. */ +/** @brief Arguments into DGL functions. */ class DGLArgs { public: const DGLValue* values; const int* type_codes; int num_args; - /*! + /** * @brief constructor * @param values The argument values * @param type_codes The argument type codes @@ -274,9 +274,9 @@ class DGLArgs { */ DGLArgs(const DGLValue* values, const int* type_codes, int num_args) : values(values), type_codes(type_codes), num_args(num_args) {} - /*! @return size of the arguments */ + /** @return size of the arguments */ inline int size() const; - /*! + /** * @brief Get i-th argument * @param i the index. * @return the ith argument. @@ -284,7 +284,7 @@ class DGLArgs { inline DGLArgValue operator[](int i) const; }; -/*! +/** * @brief Type traits to mark if a class is dgl extension type. * * To enable extension type in C++ must be register () ed via marco. @@ -300,23 +300,23 @@ struct extension_class_info { static const int code = 0; }; -/*! +/** * @brief Runtime function table about extension type. */ class ExtTypeVTable { public: - /*! @brief function to be called to delete a handle */ + /** @brief function to be called to delete a handle */ void (*destroy)(void* handle); - /*! @brief function to be called when clone a handle */ + /** @brief function to be called when clone a handle */ void* (*clone)(void* handle); - /*! + /** * @brief Register type * @tparam T The type to be register. * @return The registered vtable. */ template static inline ExtTypeVTable* Register_(); - /*! + /** * @brief Get a vtable based on type code. * @param type_code The type code * @return The registered vtable. @@ -329,7 +329,7 @@ class ExtTypeVTable { int type_code, const ExtTypeVTable& vt); }; -/*! +/** * @brief Internal base class to * handle conversion to POD values. */ @@ -393,7 +393,7 @@ class DGLPODValue_ { return static_cast(value_.v_handle)[0]; } int type_code() const { return type_code_; } - /*! + /** * @brief return handle as specific pointer type. * @tparam T the data type. * @return The pointer type. @@ -410,13 +410,13 @@ class DGLPODValue_ { DGLPODValue_(DGLValue value, int type_code) : value_(value), type_code_(type_code) {} - /*! @brief The value */ + /** @brief The value */ DGLValue value_; - /*! @brief the type code */ + /** @brief the type code */ int type_code_; }; -/*! +/** * @brief A single argument value to PackedFunc. * Containing both type_code and DGLValue * @@ -424,9 +424,9 @@ class DGLPODValue_ { */ class DGLArgValue : public DGLPODValue_ { public: - /*! @brief default constructor */ + /** @brief default constructor */ DGLArgValue() {} - /*! + /** * @brief constructor * @param value of the function * @param type_code The type code. @@ -497,7 +497,7 @@ class DGLArgValue : public DGLPODValue_ { inline std::shared_ptr& obj_sptr(); }; -/*! +/** * @brief Return Value container, * Unlike DGLArgValue, which only holds reference and do not delete * the underlying container during destruction. @@ -507,9 +507,9 @@ class DGLArgValue : public DGLPODValue_ { */ class DGLRetValue : public DGLPODValue_ { public: - /*! @brief default constructor */ + /** @brief default constructor */ DGLRetValue() {} - /*! + /** * @brief move constructor from anoter return value. * @param other The other return value. */ @@ -518,7 +518,7 @@ class DGLRetValue : public DGLPODValue_ { other.value_.v_handle = nullptr; other.type_code_ = kNull; } - /*! @brief destructor */ + /** @brief destructor */ ~DGLRetValue() { this->Clear(); } // reuse converter from parent using DGLPODValue_::operator double; @@ -652,7 +652,7 @@ class DGLRetValue : public DGLPODValue_ { this->SwitchToClass(extension_class_info::code, other); return *this; } - /*! + /** * @brief Move the value back to front-end via C API. * This marks the current container as null. * The managed resources is moved to front-end and @@ -668,7 +668,7 @@ class DGLRetValue : public DGLPODValue_ { *ret_type_code = type_code_; type_code_ = kNull; } - /*! @return The value field, if the data is POD */ + /** @return The value field, if the data is POD */ const DGLValue& value() const { CHECK( type_code_ != kObjectHandle && type_code_ != kFuncHandle && @@ -920,9 +920,9 @@ class DGLArgsSetter { inline void operator()(size_t i, const ObjectRef& other) const; // NOLINT(*) private: - /*! @brief The values fields */ + /** @brief The values fields */ DGLValue* values_; - /*! @brief The type code fields */ + /** @brief The type code fields */ int* type_codes_; }; diff --git a/include/dgl/runtime/parallel_for.h b/include/dgl/runtime/parallel_for.h index 118a71268ca1..2920d0acde36 100644 --- a/include/dgl/runtime/parallel_for.h +++ b/include/dgl/runtime/parallel_for.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file runtime/container.h * @brief Defines the container object data structures. @@ -56,7 +56,7 @@ inline size_t compute_num_threads(size_t begin, size_t end, size_t grain_size) { static DefaultGrainSizeT default_grain_size; -/*! +/** * @brief OpenMP-based parallel for loop. * * It requires each thread's workload to have at least \a grain_size elements. @@ -101,7 +101,7 @@ void parallel_for( #endif } -/*! +/** * @brief OpenMP-based parallel for loop with default grain size. * * parallel_for with grain size to default value, either 1 or controlled through @@ -117,7 +117,7 @@ void parallel_for( parallel_for(begin, end, default_grain_size(), std::forward(f)); } -/*! +/** * @brief OpenMP-based two-stage parallel reduction. * * The first-stage reduction function \a f works in parallel. Each thread's workload has diff --git a/include/dgl/runtime/registry.h b/include/dgl/runtime/registry.h index ee001a3df981..8f98a7fa30d7 100644 --- a/include/dgl/runtime/registry.h +++ b/include/dgl/runtime/registry.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/runtime/registry.h * @brief This file defines the DGL global function registry. @@ -33,22 +33,22 @@ namespace dgl { namespace runtime { -/*! @brief Registry for global function */ +/** @brief Registry for global function */ class Registry { public: - /*! + /** * @brief set the body of the function to be f * @param f The body of the function. */ DGL_DLL Registry& set_body(PackedFunc f); // NOLINT(*) - /*! + /** * @brief set the body of the function to be f * @param f The body of the function. */ Registry& set_body(PackedFunc::FType f) { // NOLINT(*) return set_body(PackedFunc(f)); } - /*! + /** * @brief set the body of the function to be TypedPackedFunc. * * @code @@ -66,7 +66,7 @@ class Registry { Registry& set_body_typed(FLambda f) { return set_body(TypedPackedFunc(f).packed()); } - /*! + /** * @brief Register a function with given name * @param name The name of the function. * @param override Whether allow oveeride existing function. @@ -74,20 +74,20 @@ class Registry { */ DGL_DLL static Registry& Register( const std::string& name, bool override = false); // NOLINT(*) - /*! + /** * @brief Erase global function from registry, if exist. * @param name The name of the function. * @return Whether function exist. */ DGL_DLL static bool Remove(const std::string& name); - /*! + /** * @brief Get the global function by name. * @param name The name of the function. * @return pointer to the registered function, * nullptr if it does not exist. */ DGL_DLL static const PackedFunc* Get(const std::string& name); // NOLINT(*) - /*! + /** * @brief Get the names of currently registered global function. * @return The names */ @@ -97,14 +97,14 @@ class Registry { struct Manager; protected: - /*! @brief name of the function */ + /** @brief name of the function */ std::string name_; - /*! @brief internal packed function */ + /** @brief internal packed function */ PackedFunc func_; friend struct Manager; }; -/*! @brief helper macro to supress unused warning */ +/** @brief helper macro to supress unused warning */ #if defined(__GNUC__) #define DGL_ATTRIBUTE_UNUSED __attribute__((unused)) #else @@ -120,7 +120,7 @@ class Registry { #define DGL_TYPE_REG_VAR_DEF \ static DGL_ATTRIBUTE_UNUSED ::dgl::runtime::ExtTypeVTable* __mk_##DGLT -/*! +/** * @brief Register a function globally. * @code * DGL_REGISTER_GLOBAL("MyPrint") @@ -132,7 +132,7 @@ class Registry { DGL_STR_CONCAT(DGL_FUNC_REG_VAR_DEF, __COUNTER__) = \ ::dgl::runtime::Registry::Register(OpName) -/*! +/** * @brief Macro to register extension type. * This must be registered in a cc file * after the trait extension_class_info is defined. diff --git a/include/dgl/runtime/serializer.h b/include/dgl/runtime/serializer.h index eaa5a382b6b6..5c155b7fa5ad 100644 --- a/include/dgl/runtime/serializer.h +++ b/include/dgl/runtime/serializer.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/runtime/serializer.h * @brief Serializer extension to support DGL data types diff --git a/include/dgl/runtime/shared_mem.h b/include/dgl/runtime/shared_mem.h index a8b4f6686e19..5f23c148f077 100644 --- a/include/dgl/runtime/shared_mem.h +++ b/include/dgl/runtime/shared_mem.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/runtime/ndarray.h * @brief shared memory management. @@ -14,7 +14,7 @@ namespace dgl { namespace runtime { -/* +/** * @brief This class owns shared memory. * * When the object is gone, the shared memory will also be destroyed. @@ -22,7 +22,7 @@ namespace runtime { * the shared memory is removed. */ class SharedMemory { - /* + /** * @brief whether the shared memory is owned by the object. * * If shared memory is created in the object, it'll be owned by the object @@ -41,7 +41,7 @@ class SharedMemory { /* @brief the size of the shared memory. */ size_t size_; - /* + /** * @brief the name of the object. * * In Unix, shared memory is identified by a file. Thus, `name` is actually @@ -54,31 +54,31 @@ class SharedMemory { */ std::string GetName() const { return name; } - /* + /** * @brief constructor of the shared memory. * @param name The file corresponding to the shared memory. */ explicit SharedMemory(const std::string &name); - /* + /** * @brief destructor of the shared memory. * It deallocates the shared memory and removes the corresponding file. */ ~SharedMemory(); - /* + /** * @brief create shared memory. * It creates the file and shared memory. * @param sz the size of the shared memory. * @return the address of the shared memory */ void *CreateNew(size_t sz); - /* + /** * @brief allocate shared memory that has been created. * @param sz the size of the shared memory. * @return the address of the shared memory */ void *Open(size_t sz); - /* + /** * @brief check if the shared memory exist. * @param name the name of the shared memory. * @return a boolean value to indicate if the shared memory exists. diff --git a/include/dgl/runtime/smart_ptr_serializer.h b/include/dgl/runtime/smart_ptr_serializer.h index 4c36588919bc..f46f337992f3 100644 --- a/include/dgl/runtime/smart_ptr_serializer.h +++ b/include/dgl/runtime/smart_ptr_serializer.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/runtime/serializer.h * @brief Serializer extension to support DGL data types diff --git a/include/dgl/runtime/tensordispatch.h b/include/dgl/runtime/tensordispatch.h index f74dd1910641..fb3851cdaebd 100644 --- a/include/dgl/runtime/tensordispatch.h +++ b/include/dgl/runtime/tensordispatch.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020-2022 by Contributors * @file array/tensordispatch.h * @brief This file defines the dispatcher of tensor operators to @@ -38,7 +38,7 @@ #endif // DGL_USE_CUDA #include "ndarray.h" -/*! +/** * @brief Casts a pointer \c entry to a function pointer with signature of \c * func. */ @@ -47,7 +47,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief Dispatcher that delegates the function calls to framework-specific C++ * APIs. * @@ -55,19 +55,19 @@ namespace runtime { */ class TensorDispatcher { public: - /*! @brief Get the singleton instance. */ + /** @brief Get the singleton instance. */ static TensorDispatcher* Global() { static TensorDispatcher inst; return &inst; } - /*! @brief Whether an adapter library is available. */ + /** @brief Whether an adapter library is available. */ inline bool IsAvailable() { return available_; } - /*! @brief Load symbols from the given tensor adapter library path. */ + /** @brief Load symbols from the given tensor adapter library path. */ bool Load(const char* path_cstr); - /*! + /** * @brief Allocate a piece of CPU memory via PyTorch's CPUAllocator. * Used in CPUDeviceAPI::AllocWorkspace(). * @@ -79,7 +79,7 @@ class TensorDispatcher { return FUNCCAST(tensoradapter::CPURawAlloc, entry)(nbytes); } - /*! + /** * @brief Free the CPU memory. * Used in CPUDeviceAPI::FreeWorkspace(). * @@ -91,7 +91,7 @@ class TensorDispatcher { } #ifdef DGL_USE_CUDA - /*! + /** * @brief Allocate a piece of GPU memory via * PyTorch's THCCachingAllocator. * Used in CUDADeviceAPI::AllocWorkspace(). @@ -109,7 +109,7 @@ class TensorDispatcher { return FUNCCAST(tensoradapter::CUDARawAlloc, entry)(nbytes, stream); } - /*! + /** * @brief Free the GPU memory. * Used in CUDADeviceAPI::FreeWorkspace(). * @@ -120,7 +120,7 @@ class TensorDispatcher { FUNCCAST(tensoradapter::CUDARawDelete, entry)(ptr); } - /*! + /** * @brief Find the current PyTorch CUDA stream * Used in runtime::getCurrentCUDAStream(). * @@ -136,7 +136,7 @@ class TensorDispatcher { } #endif // DGL_USE_CUDA - /*! + /** * @brief Record streams that are using this tensor. * Used in NDArray::RecordStream(). * @@ -153,12 +153,12 @@ class TensorDispatcher { } private: - /*! @brief ctor */ + /** @brief ctor */ TensorDispatcher() = default; - /*! @brief dtor */ + /** @brief dtor */ ~TensorDispatcher(); - /*! + /** * @brief List of symbols in the adapter library. * * Must match the functions in tensoradapter/include/tensoradapter.h. @@ -170,7 +170,7 @@ class TensorDispatcher { #endif // DGL_USE_CUDA }; - /*! @brief Index of each function to the symbol list */ + /** @brief Index of each function to the symbol list */ class Op { public: static constexpr int kCPURawAlloc = 0; @@ -183,10 +183,10 @@ class TensorDispatcher { #endif // DGL_USE_CUDA }; - /*! @brief Number of functions */ + /** @brief Number of functions */ static constexpr int num_entries_ = sizeof(names_) / sizeof(names_[0]); - /*! @brief Entrypoints of each function */ + /** @brief Entrypoints of each function */ void* entrypoints_[num_entries_] = { nullptr, nullptr, #ifdef DGL_USE_CUDA diff --git a/include/dgl/runtime/threading_backend.h b/include/dgl/runtime/threading_backend.h index b6a6541bb7af..59fdc9e5a8bc 100644 --- a/include/dgl/runtime/threading_backend.h +++ b/include/dgl/runtime/threading_backend.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file dgl/runtime/threading_backend.h * @brief Utilities for manipulating thread pool threads. @@ -14,7 +14,7 @@ namespace dgl { namespace runtime { namespace threading { -/*! +/** * @brief A platform-agnostic abstraction for managing a collection of * thread pool threads. */ @@ -22,7 +22,7 @@ class ThreadGroup { public: class Impl; - /*! + /** * @brief Creates a collection of threads which run a provided function. * * @param num_workers The total number of worker threads in this group. @@ -39,7 +39,7 @@ class ThreadGroup { bool exclude_worker0 = false); ~ThreadGroup(); - /*! + /** * @brief Blocks until all non-main threads in the pool finish. */ void Join(); @@ -49,7 +49,7 @@ class ThreadGroup { kLittle = -1, }; - /*! + /** * @brief configure the CPU id affinity * * @param mode The preferred CPU type (1 = big, -1 = little). @@ -67,14 +67,14 @@ class ThreadGroup { Impl* impl_; }; -/*! +/** * @brief Platform-agnostic no-op. */ // This used to be Yield(), renaming to YieldThread() because windows.h defined // it as a macro in later SDKs. void YieldThread(); -/*! +/** * @return the maximum number of effective workers for this system. */ int MaxConcurrency(); diff --git a/include/dgl/runtime/util.h b/include/dgl/runtime/util.h index 7b6003e852a9..b05af624e2e4 100644 --- a/include/dgl/runtime/util.h +++ b/include/dgl/runtime/util.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dgl/runtime/util.h * @brief Useful runtime util. @@ -11,7 +11,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief Check whether type matches the given spec. * @param t The type * @param code The type code. @@ -28,7 +28,7 @@ inline bool TypeMatch(DGLDataType t, int code, int bits, int lanes = 1) { namespace dgl { namespace ir { namespace intrinsic { -/*! @brief The kind of structure field info used in intrinsic */ +/** @brief The kind of structure field info used in intrinsic */ enum DGLStructFieldKind : int { // array head address kArrAddr, diff --git a/include/dgl/sampler.h b/include/dgl/sampler.h index 5a52c8e277c4..85d4e0adae9b 100644 --- a/include/dgl/sampler.h +++ b/include/dgl/sampler.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file dgl/sampler.h * @brief DGL sampler header. @@ -20,7 +20,7 @@ class ImmutableGraph; class SamplerOp { public: - /*! + /** * @brief Sample a graph from the seed vertices with neighbor sampling. * The neighbors are sampled with a uniform distribution. * @@ -39,7 +39,7 @@ class SamplerOp { const std::string &edge_type, int num_hops, int expand_factor, const bool add_self_loop, const ValueType *probability); - /*! + /** * @brief Sample a graph from the seed vertices with layer sampling. * The layers are sampled with a uniform distribution. * diff --git a/include/dgl/sampling/negative.h b/include/dgl/sampling/negative.h index 89af290493b5..8183a313949a 100644 --- a/include/dgl/sampling/negative.h +++ b/include/dgl/sampling/negative.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/sampling/negative.h * @brief Negative sampling. @@ -14,7 +14,7 @@ namespace dgl { namespace sampling { -/*! +/** * @brief Given an edge type, uniformly sample source-destination pairs that do * not have an edge in between using rejection sampling. * diff --git a/include/dgl/sampling/neighbor.h b/include/dgl/sampling/neighbor.h index 4ae8b67ccc84..7c17050777a2 100644 --- a/include/dgl/sampling/neighbor.h +++ b/include/dgl/sampling/neighbor.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file dgl/sampling/neighbor.h * @brief Neighborhood-based sampling. @@ -14,7 +14,7 @@ namespace dgl { namespace sampling { -/*! +/** * @brief Sample from the neighbors of the given nodes and return the sampled * edges as a graph. * @@ -47,7 +47,7 @@ HeteroSubgraph SampleNeighbors( const std::vector& probability, const std::vector& exclude_edges, bool replace = true); -/*! +/** * Select the neighbors with k-largest weights on the connecting edges for each * given node. * diff --git a/include/dgl/sampling/randomwalks.h b/include/dgl/sampling/randomwalks.h index daa6197ae508..964c6addb7d2 100644 --- a/include/dgl/sampling/randomwalks.h +++ b/include/dgl/sampling/randomwalks.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file dgl/samplinig/randomwalks.h * @brief Random walk functions. @@ -17,7 +17,7 @@ namespace dgl { namespace sampling { -/*! +/** * @brief Metapath-based random walk. * @param hg The heterograph. * @param seeds A 1D array of seed nodes, with the type the source type of the @@ -37,7 +37,7 @@ std::tuple RandomWalk( const HeteroGraphPtr hg, const IdArray seeds, const TypeArray metapath, const std::vector &prob); -/*! +/** * @brief Metapath-based random walk with restart probability. * @param hg The heterograph. * @param seeds A 1D array of seed nodes, with the type the source type of the @@ -58,7 +58,7 @@ std::tuple RandomWalkWithRestart( const HeteroGraphPtr hg, const IdArray seeds, const TypeArray metapath, const std::vector &prob, double restart_prob); -/*! +/** * @brief Metapath-based random walk with stepwise restart probability. Useful * for PinSAGE-like models. * @param hg The heterograph. diff --git a/include/dgl/scheduler.h b/include/dgl/scheduler.h index 35f33acb5cd3..d5840cd8a51d 100644 --- a/include/dgl/scheduler.h +++ b/include/dgl/scheduler.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file dgl/scheduler.h * @brief Operations on graph index. @@ -16,7 +16,7 @@ typedef dgl::runtime::NDArray IdArray; namespace sched { -/*! +/** * @brief Generate degree bucketing schedule * @tparam IdType Graph's index data type, can be int32_t or int64_t * @param msg_ids The edge id for each message @@ -35,7 +35,7 @@ template std::vector DegreeBucketing( const IdArray& msg_ids, const IdArray& vids, const IdArray& recv_ids); -/*! +/** * @brief Generate degree bucketing schedule for group_apply edge * @tparam IdType Graph's index data type, can be int32_t or int64_t * @param uids One end vertex of edge by which edges are grouped diff --git a/include/dgl/transform.h b/include/dgl/transform.h index ce17620bc702..4e450326d68f 100644 --- a/include/dgl/transform.h +++ b/include/dgl/transform.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file dgl/transform.h * @brief DGL graph transformations @@ -18,7 +18,7 @@ namespace dgl { namespace transform { -/*! +/** * @brief Given a list of graphs, remove the common nodes that do not have * inbound and outbound edges. * @@ -37,7 +37,7 @@ std::pair, std::vector> CompactGraphs( const std::vector &graphs, const std::vector &always_preserve); -/*! +/** * @brief Convert a graph into a bipartite-structured graph for message passing. * * Specifically, we create one node type \c ntype_l on the "left" side and @@ -83,7 +83,7 @@ std::tuple, std::vector> ToBlock( HeteroGraphPtr graph, const std::vector &rhs_nodes, bool include_rhs_in_lhs); -/*! +/** * @brief Convert a multigraph to a simple graph. * * @return A triplet of @@ -116,7 +116,7 @@ std::tuple, std::vector> ToBlock( std::tuple, std::vector> ToSimpleGraph(const HeteroGraphPtr graph); -/*! +/** * @brief Remove edges from a graph. * * @param graph The graph. diff --git a/include/dgl/zerocopy_serializer.h b/include/dgl/zerocopy_serializer.h index 3d988b87ef59..0ba962f6267b 100644 --- a/include/dgl/zerocopy_serializer.h +++ b/include/dgl/zerocopy_serializer.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file rpc/shared_mem_serializer.h * @brief headers for serializer. @@ -23,7 +23,7 @@ namespace dgl { -/*! +/** * * StreamWithBuffer is backed up by dmlc::MemoryFixedSizeStream or * dmlc::MemoryStringStream. This class supports serializing and deserializing @@ -62,7 +62,7 @@ class StreamWithBuffer : public dmlc::SeekStream { explicit Buffer(void* data) : data(data) {} }; - /*! + /** * @brief This constructor is for writing scenario or reading from local * machine * @param strm The backup stream to write/load from @@ -74,7 +74,7 @@ class StreamWithBuffer : public dmlc::SeekStream { : strm_(std::move(strm)), buffer_list_(), send_to_remote_(send_to_remote) {} - /*! + /** * @brief This constructor is for reading from remote * @param strm The stream to write/load from zerocopy write/load * @param data_ptr_list list of pointer to reconstruct NDArray @@ -94,7 +94,7 @@ class StreamWithBuffer : public dmlc::SeekStream { } } - /*! + /** * @brief Construct stream backed up by string * @param blob The string to write/load from zerocopy write/load * @param send_to_remote Whether this stream will be deserialized at remote @@ -105,7 +105,7 @@ class StreamWithBuffer : public dmlc::SeekStream { : strm_(new dmlc::MemoryStringStream(blob)), send_to_remote_(send_to_remote) {} - /*! + /** * @brief Construct stream backed up by string * @param p_buffer buffer pointer * @param size buffer size @@ -117,7 +117,7 @@ class StreamWithBuffer : public dmlc::SeekStream { : strm_(new dmlc::MemoryFixedSizeStream(p_buffer, size)), send_to_remote_(send_to_remote) {} - /*! + /** * @brief Construct stream backed up by string, and reconstruct NDArray * from data_ptr_list * @param blob The string to write/load from zerocopy write/load @@ -130,7 +130,7 @@ class StreamWithBuffer : public dmlc::SeekStream { } } - /*! + /** * @brief Construct stream backed up by string, and reconstruct NDArray * from data_ptr_list * @param p_buffer buffer pointer @@ -155,14 +155,14 @@ class StreamWithBuffer : public dmlc::SeekStream { using dmlc::Stream::Read; using dmlc::Stream::Write; - /*! + /** * @brief push NDArray into stream * If send_to_remote=true, the NDArray will be saved to the buffer list * If send_to_remote=false, the NDArray will be saved to the backedup string */ void PushNDArray(const runtime::NDArray& tensor); - /*! + /** * @brief pop NDArray from stream * If send_to_remote=true, the NDArray will be reconstructed from buffer list * If send_to_remote=false, the NDArray will be reconstructed from shared @@ -170,12 +170,12 @@ class StreamWithBuffer : public dmlc::SeekStream { */ dgl::runtime::NDArray PopNDArray(); - /*! + /** * @brief Get whether this stream is for remote usage */ bool send_to_remote() { return send_to_remote_; } - /*! + /** * @brief Get underlying buffer list */ const std::deque& buffer_list() const { return buffer_list_; } diff --git a/include/intel/cpu_support.h b/include/intel/cpu_support.h index dbeca89dab0b..09a12c635d75 100644 --- a/include/intel/cpu_support.h +++ b/include/intel/cpu_support.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file intel/cpu_support.h * @brief Intel CPU support @@ -53,7 +53,7 @@ struct IntelKernel { } }; -/*! +/** * @brief Element-wise addition kernel using Intel AVX512 instructions. * @note it uses AVX512. */ diff --git a/include/intel/meta_utils.h b/include/intel/meta_utils.h index 3d070439e47d..ed5cd19b87a4 100644 --- a/include/intel/meta_utils.h +++ b/include/intel/meta_utils.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file intel/meta_utils.h * @brief Meta programming utils diff --git a/src/api/api_container.cc b/src/api/api_container.cc index 14c30319cce5..fd3280ff2a24 100644 --- a/src/api/api_container.cc +++ b/src/api/api_container.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file api/api_container.cc * @brief Runtime container APIs. (reference: tvm/src/api/api_lang.cc) diff --git a/src/api/api_test.cc b/src/api/api_test.cc index 9f5d34f63d07..41c58df01941 100644 --- a/src/api/api_test.cc +++ b/src/api/api_test.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2022 by Contributors * @file api/api_test.cc * @brief C APIs for testing FFI diff --git a/src/array/arith.h b/src/array/arith.h index 44a5abd696d4..9526c694eb74 100644 --- a/src/array/arith.h +++ b/src/array/arith.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/arith.h * @brief Arithmetic functors diff --git a/src/array/array.cc b/src/array/array.cc index b558678ab474..bea9f9f11ff2 100644 --- a/src/array/array.cc +++ b/src/array/array.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019-2021 by Contributors * @file array/array.cc * @brief DGL array utilities implementation @@ -956,7 +956,7 @@ COOToSimple(const COOMatrix& coo) { const COOMatrix &coalesced_adj = coalesced_result.first; const IdArray &count = coalesced_result.second; - /* + /** * eids_shuffled actually already contains the mapping from old edge space to the * new one: * diff --git a/src/array/array_arith.cc b/src/array/array_arith.cc index 88467f84d085..4e9bf1f75908 100644 --- a/src/array/array_arith.cc +++ b/src/array/array_arith.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/array_aritch.cc * @brief DGL array arithmetic operations diff --git a/src/array/array_op.h b/src/array/array_op.h index d7f72767ccf2..36d6331109f3 100644 --- a/src/array/array_op.h +++ b/src/array/array_op.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/array_op.h * @brief Array operator templates diff --git a/src/array/check.h b/src/array/check.h index 36f331e14363..e95920979c79 100644 --- a/src/array/check.h +++ b/src/array/check.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/check.h * @brief DGL check utilities diff --git a/src/array/cpu/array_cumsum.cc b/src/array/cpu/array_cumsum.cc index d56bd1203eae..6c8523c3b58e 100644 --- a/src/array/cpu/array_cumsum.cc +++ b/src/array/cpu/array_cumsum.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/array_cumsum.cc * @brief Array cumsum CPU implementation diff --git a/src/array/cpu/array_index_select.cc b/src/array/cpu/array_index_select.cc index 8da75a1c0340..c19cecd9b260 100644 --- a/src/array/cpu/array_index_select.cc +++ b/src/array/cpu/array_index_select.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/array_index_select.cc * @brief Array index select CPU implementation diff --git a/src/array/cpu/array_nonzero.cc b/src/array/cpu/array_nonzero.cc index 66fc32739e99..683dcd03db73 100644 --- a/src/array/cpu/array_nonzero.cc +++ b/src/array/cpu/array_nonzero.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/array_nonzero.cc * @brief Array nonzero CPU implementation diff --git a/src/array/cpu/array_op_impl.cc b/src/array/cpu/array_op_impl.cc index f2ba23bdaf40..699d2a2f26f2 100644 --- a/src/array/cpu/array_op_impl.cc +++ b/src/array/cpu/array_op_impl.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/array_op_impl.cc * @brief Array operator CPU implementation diff --git a/src/array/cpu/array_pack.cc b/src/array/cpu/array_pack.cc index 2bf2c20f8d1b..aa571da2ec1f 100644 --- a/src/array/cpu/array_pack.cc +++ b/src/array/cpu/array_pack.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/array_index_select.cc * @brief Array index select CPU implementation diff --git a/src/array/cpu/array_repeat.cc b/src/array/cpu/array_repeat.cc index 2b3c525f9d7d..0b2098a68914 100644 --- a/src/array/cpu/array_repeat.cc +++ b/src/array/cpu/array_repeat.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/array_repeat.cc * @brief Array repeat CPU implementation diff --git a/src/array/cpu/array_scatter.cc b/src/array/cpu/array_scatter.cc index 8b837c37c726..8ab54156e16b 100644 --- a/src/array/cpu/array_scatter.cc +++ b/src/array/cpu/array_scatter.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/array_scatter.cc * @brief Array scatter CPU implementation diff --git a/src/array/cpu/array_sort.cc b/src/array/cpu/array_sort.cc index aff707aed29d..7608ebd7885c 100644 --- a/src/array/cpu/array_sort.cc +++ b/src/array/cpu/array_sort.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/array_sort.cc * @brief Array sort CPU implementation diff --git a/src/array/cpu/array_utils.h b/src/array/cpu/array_utils.h index 74cb257794b6..5a46be613770 100644 --- a/src/array/cpu/array_utils.h +++ b/src/array/cpu/array_utils.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file dgl/array_utils.h * @brief Utility classes and functions for DGL arrays. @@ -18,7 +18,7 @@ namespace dgl { namespace aten { -/*! +/** * @brief A hashmap that maps each ids in the given array to new ids starting * from zero. * @@ -109,7 +109,7 @@ class IdHashMap { phmap::flat_hash_map oldv2newv_; }; -/* +/** * @brief Hash type for building maps/sets with pairs as keys. */ struct PairHash { diff --git a/src/array/cpu/coo_coalesce.cc b/src/array/cpu/coo_coalesce.cc index aa02f53aad8c..b9e677ff6dee 100644 --- a/src/array/cpu/coo_coalesce.cc +++ b/src/array/cpu/coo_coalesce.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/coo_coalesce.cc * @brief COO coalescing diff --git a/src/array/cpu/coo_linegraph.cc b/src/array/cpu/coo_linegraph.cc index 891307b800bd..080533e1da65 100644 --- a/src/array/cpu/coo_linegraph.cc +++ b/src/array/cpu/coo_linegraph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/coo_line_graph.cc * @brief COO LineGraph diff --git a/src/array/cpu/coo_remove.cc b/src/array/cpu/coo_remove.cc index cf70cc6442fa..a7caeaa9a267 100644 --- a/src/array/cpu/coo_remove.cc +++ b/src/array/cpu/coo_remove.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/coo_remove.cc * @brief COO matrix remove entries CPU implementation @@ -17,7 +17,7 @@ namespace impl { namespace { -/*! @brief COORemove implementation for COOMatrix with default consecutive edge +/** @brief COORemove implementation for COOMatrix with default consecutive edge * IDs */ template void COORemoveConsecutive( @@ -45,7 +45,7 @@ void COORemoveConsecutive( } } -/*! @brief COORemove implementation for COOMatrix with shuffled edge IDs */ +/** @brief COORemove implementation for COOMatrix with shuffled edge IDs */ template void COORemoveShuffled( COOMatrix coo, IdArray entries, std::vector *new_rows, diff --git a/src/array/cpu/coo_sort.cc b/src/array/cpu/coo_sort.cc index b089e5dbaddb..20668a5f003a 100644 --- a/src/array/cpu/coo_sort.cc +++ b/src/array/cpu/coo_sort.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/coo_sort.cc * @brief COO sorting diff --git a/src/array/cpu/csr_get_data.cc b/src/array/cpu/csr_get_data.cc index 4989ef3b9e5c..75fa4233b610 100644 --- a/src/array/cpu/csr_get_data.cc +++ b/src/array/cpu/csr_get_data.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/cpu/csr_get_data.cc * @brief Retrieve entries of a CSR matrix diff --git a/src/array/cpu/csr_mm.cc b/src/array/cpu/csr_mm.cc index eaf0ec986314..9833381a52a1 100644 --- a/src/array/cpu/csr_mm.cc +++ b/src/array/cpu/csr_mm.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/csr_mm.cc * @brief CSR Matrix Multiplication diff --git a/src/array/cpu/csr_remove.cc b/src/array/cpu/csr_remove.cc index 4a3d72676255..1ed188229458 100644 --- a/src/array/cpu/csr_remove.cc +++ b/src/array/cpu/csr_remove.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/coo_remove.cc * @brief CSR matrix remove entries CPU implementation diff --git a/src/array/cpu/csr_sort.cc b/src/array/cpu/csr_sort.cc index 2e3ef95dd527..5ff1e93bd1a3 100644 --- a/src/array/cpu/csr_sort.cc +++ b/src/array/cpu/csr_sort.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/csr_sort.cc * @brief CSR sorting diff --git a/src/array/cpu/csr_sum.cc b/src/array/cpu/csr_sum.cc index c95a8c72bb45..4a8a9bb3d3c4 100644 --- a/src/array/cpu/csr_sum.cc +++ b/src/array/cpu/csr_sum.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/csr_sum.cc * @brief CSR Summation diff --git a/src/array/cpu/csr_to_simple.cc b/src/array/cpu/csr_to_simple.cc index bd256266ba3e..9a7c57f09621 100644 --- a/src/array/cpu/csr_to_simple.cc +++ b/src/array/cpu/csr_to_simple.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/csr_to_simple.cc * @brief CSR sorting diff --git a/src/array/cpu/csr_union.cc b/src/array/cpu/csr_union.cc index e9cfb7279628..d694128c2218 100644 --- a/src/array/cpu/csr_union.cc +++ b/src/array/cpu/csr_union.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/coo_sort.cc * @brief COO sorting diff --git a/src/array/cpu/gather_mm.cc b/src/array/cpu/gather_mm.cc index ec1d0be90459..a2fdf895a26c 100644 --- a/src/array/cpu/gather_mm.cc +++ b/src/array/cpu/gather_mm.cc @@ -1,4 +1,4 @@ - /*! + /** * Copyright (c) 2020 by Contributors * @file kernel/cpu/gaher_mm.cc * @brief GatherMM C APIs and definitions. @@ -9,7 +9,7 @@ namespace dgl { namespace aten { -/*! @brief Generalized SegmentMM. */ +/** @brief Generalized SegmentMM. */ template void SegmentMM(const NDArray A, const NDArray B, @@ -27,7 +27,7 @@ void SegmentMMBackwardB(const NDArray A, LOG(FATAL) << "Unsupported CPU kernel for SegmentMMBackwardB."; } -/*! @brief Generalized GatherMM. */ +/** @brief Generalized GatherMM. */ template void GatherMM(const NDArray A, const NDArray B, @@ -37,7 +37,7 @@ void GatherMM(const NDArray A, LOG(FATAL) << "Unsupported CPU kernel for GatherMM."; } -/*! @brief Generalized GatherMM_scatter. */ +/** @brief Generalized GatherMM_scatter. */ template void GatherMMScatter(const NDArray A, const NDArray B, diff --git a/src/array/cpu/gather_mm.h b/src/array/cpu/gather_mm.h index 0844296c8440..69ecb91f8495 100644 --- a/src/array/cpu/gather_mm.h +++ b/src/array/cpu/gather_mm.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2022 by Contributors * @file array/cpu/gather_mm.h * @brief GATHER_MM CPU kernel function header. @@ -45,7 +45,7 @@ void matmul( } } -/*! +/** * @brief CPU kernel of Gather_mm. The input matrix A is expected to be * sorted according to relation type. * @param A The input dense matrix of dimension m x k diff --git a/src/array/cpu/negative_sampling.cc b/src/array/cpu/negative_sampling.cc index 51c11515f00e..53dbf0904453 100644 --- a/src/array/cpu/negative_sampling.cc +++ b/src/array/cpu/negative_sampling.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/cpu/negative_sampling.cc * @brief Uniform negative sampling on CSR. diff --git a/src/array/cpu/rowwise_pick.h b/src/array/cpu/rowwise_pick.h index 71124fafabbf..9252a5b8f705 100644 --- a/src/array/cpu/rowwise_pick.h +++ b/src/array/cpu/rowwise_pick.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/rowwise_pick.h * @brief Template implementation for rowwise pick operators. diff --git a/src/array/cpu/rowwise_sampling.cc b/src/array/cpu/rowwise_sampling.cc index df95fe21e114..b91184064234 100644 --- a/src/array/cpu/rowwise_sampling.cc +++ b/src/array/cpu/rowwise_sampling.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/rowwise_sampling.cc * @brief rowwise sampling diff --git a/src/array/cpu/rowwise_topk.cc b/src/array/cpu/rowwise_topk.cc index 8db7b571a824..4ff72912f42e 100644 --- a/src/array/cpu/rowwise_topk.cc +++ b/src/array/cpu/rowwise_topk.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/rowwise_topk.cc * @brief rowwise topk diff --git a/src/array/cpu/sddmm.cc b/src/array/cpu/sddmm.cc index 4b4ae8e49223..3cfc7fdf1b7d 100644 --- a/src/array/cpu/sddmm.cc +++ b/src/array/cpu/sddmm.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file aten/cpu/sddmm.cc * @brief SDDMM C APIs and definitions. @@ -42,7 +42,7 @@ namespace aten { } while (0) -/*! @brief Generalized SDDMM on Csr format. */ +/** @brief Generalized SDDMM on Csr format. */ template void SDDMMCsr(const std::string& op, const BcastOff& bcast, @@ -59,7 +59,7 @@ void SDDMMCsr(const std::string& op, }); } -/*! @brief Generalized SDDMM on Csr format with Heterograph support. */ +/** @brief Generalized SDDMM on Csr format with Heterograph support. */ template void SDDMMCsrHetero(const std::string& op, const BcastOff& bcast, @@ -131,7 +131,7 @@ template void SDDMMCsrHetero( const std::vector& in_eid, const std::vector& out_eid); -/*! @brief Generalized SDDMM on Coo format. */ +/** @brief Generalized SDDMM on Coo format. */ template void SDDMMCoo(const std::string& op, const BcastOff& bcast, @@ -148,7 +148,7 @@ void SDDMMCoo(const std::string& op, }); } -/*! @brief Generalized SDDMM on Coo format with Heterograph support. */ +/** @brief Generalized SDDMM on Coo format with Heterograph support. */ template void SDDMMCooHetero(const std::string& op, const BcastOff& bcast, diff --git a/src/array/cpu/sddmm.h b/src/array/cpu/sddmm.h index bfc444499891..9e372bfc3ac2 100644 --- a/src/array/cpu/sddmm.h +++ b/src/array/cpu/sddmm.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/sddmm.h * @brief SDDMM CPU kernel function header. @@ -16,7 +16,7 @@ namespace dgl { namespace aten { namespace cpu { -/*! +/** * @brief CPU kernel of g-SDDMM on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. @@ -68,7 +68,7 @@ void SDDMMCsr( }); } -/*! +/** * @brief CPU kernel of g-SDDMM on Coo format. * @param bcast Broadcast information. * @param coo The COO matrix. diff --git a/src/array/cpu/segment_reduce.cc b/src/array/cpu/segment_reduce.cc index b48992bd7827..4b02f253a6f0 100644 --- a/src/array/cpu/segment_reduce.cc +++ b/src/array/cpu/segment_reduce.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file kernel/cpu/segment_reduce.cc * @brief Segment reduce C APIs and definitions. @@ -11,7 +11,7 @@ namespace dgl { namespace aten { -/*! @brief Segment Reduce operator. */ +/** @brief Segment Reduce operator. */ template void SegmentReduce( const std::string& op, @@ -34,7 +34,7 @@ void SegmentReduce( } } -/*! @brief Scatter Add.*/ +/** @brief Scatter Add.*/ template void ScatterAdd(NDArray feat, NDArray idx, @@ -42,7 +42,7 @@ void ScatterAdd(NDArray feat, cpu::ScatterAdd(feat, idx, out); } -/*! @brief Update gradients for reduce operator max/min on heterogeneous graph.*/ +/** @brief Update gradients for reduce operator max/min on heterogeneous graph.*/ template void UpdateGradMinMax_hetero(const HeteroGraphPtr& g, const std::string& op, @@ -53,7 +53,7 @@ void UpdateGradMinMax_hetero(const HeteroGraphPtr& g, cpu::UpdateGradMinMax_hetero(g, op, feat, idx, idx_etype, out); } -/*! @brief Backward function of segment cmp.*/ +/** @brief Backward function of segment cmp.*/ template void BackwardSegmentCmp( NDArray feat, diff --git a/src/array/cpu/segment_reduce.h b/src/array/cpu/segment_reduce.h index 76c6077fd5a0..fa7ec74f86dc 100644 --- a/src/array/cpu/segment_reduce.h +++ b/src/array/cpu/segment_reduce.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/spmm.h * @brief Segment reduce kernel function header. @@ -17,7 +17,7 @@ namespace dgl { namespace aten { namespace cpu { -/*! +/** * @brief CPU kernel of segment sum. * @param feat The input tensor. * @param offsets The offset tensor storing the ranges of segments. @@ -42,7 +42,7 @@ void SegmentSum(NDArray feat, NDArray offsets, NDArray out) { }); } -/*! +/** * @brief CPU kernel of segment min/max. * @param feat The input tensor. * @param offsets The offset tensor storing the ranges of segments. @@ -76,7 +76,7 @@ void SegmentCmp(NDArray feat, NDArray offsets, NDArray out, NDArray arg) { }); } -/*! +/** * @brief CPU kernel of Scatter Add (on first dimension) operator. * @note math equation: out[idx[i], *] += feat[i, *] * @param feat The input tensor. @@ -101,7 +101,7 @@ void ScatterAdd(NDArray feat, NDArray idx, NDArray out) { } } -/*! +/** * @brief CPU kernel to update gradients for reduce op max/min * @param graph The input heterogeneous graph. * @param op The binary operator, could be `copy_u`, `copy_e'. @@ -159,7 +159,7 @@ void UpdateGradMinMax_hetero( } } -/*! +/** * @brief CPU kernel of backward phase of segment min/max. * @note math equation: out[arg[i, k], k] = feat[i, k] * @param feat The input tensor. diff --git a/src/array/cpu/spmat_op_impl_coo.cc b/src/array/cpu/spmat_op_impl_coo.cc index 38b8a9b7ca82..5c3ffcb26396 100644 --- a/src/array/cpu/spmat_op_impl_coo.cc +++ b/src/array/cpu/spmat_op_impl_coo.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/spmat_op_impl.cc * @brief CPU implementation of COO sparse matrix operators @@ -22,7 +22,7 @@ using runtime::parallel_for; namespace aten { namespace impl { -/* +/** * TODO(BarclayII): * For row-major sorted COOs, we have faster implementation with binary search, * sorted search, etc. Later we should benchmark how much we can gain with @@ -624,7 +624,7 @@ CSRMatrix UnSortedDenseCOOToCSR(const COOMatrix &coo) { } // namespace -/* +/** Implementation and Complexity details. N: num_nodes, NNZ: num_edges, P: num_threads. 1. If row is sorted in COO, SortedCOOToCSR<> is applied. Time: O(NNZ/P). diff --git a/src/array/cpu/spmat_op_impl_csr.cc b/src/array/cpu/spmat_op_impl_csr.cc index f95faab2c9df..0b8d063945e3 100644 --- a/src/array/cpu/spmat_op_impl_csr.cc +++ b/src/array/cpu/spmat_op_impl_csr.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/spmat_op_impl_csr.cc * @brief CSR matrix operator CPU implementation diff --git a/src/array/cpu/spmm.cc b/src/array/cpu/spmm.cc index d1d567417377..2e2c3f6ce498 100644 --- a/src/array/cpu/spmm.cc +++ b/src/array/cpu/spmm.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file kernel/cpu/spmm.cc * @brief SPMM C APIs and definitions. @@ -9,7 +9,7 @@ namespace dgl { namespace aten { -/*! @brief Generalized SpMM on Csr format. */ +/** @brief Generalized SpMM on Csr format. */ template void SpMMCsr(const std::string& op, const std::string& reduce, const BcastOff& bcast, @@ -41,7 +41,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce, } } -/*! @brief Generalized SpMM on Csr format. */ +/** @brief Generalized SpMM on Csr format. */ template void SpMMCsrHetero(const std::string& op, const std::string& reduce, const BcastOff& bcast, @@ -159,7 +159,7 @@ template void SpMMCsrHetero( const std::vector& ufeat_node_tids, const std::vector& out_node_tids); -/*! @brief Edge_softmax_csr forward op on Csr format. */ +/** @brief Edge_softmax_csr forward op on Csr format. */ template void Edge_softmax_csr_forward(const std::string& op, const BcastOff& bcast, @@ -172,7 +172,7 @@ void Edge_softmax_csr_forward(const std::string& op, }); } -/*! @brief Edge_softmax_csr backward op on Csr format. */ +/** @brief Edge_softmax_csr backward op on Csr format. */ template void Edge_softmax_csr_backward(const std::string& op, const BcastOff& bcast, @@ -219,7 +219,7 @@ template void Edge_softmax_csr_backward( const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out); -/*! @brief Generalized SpMM on Coo format. */ +/** @brief Generalized SpMM on Coo format. */ template void SpMMCoo(const std::string& op, const std::string& reduce, const BcastOff& bcast, diff --git a/src/array/cpu/spmm.h b/src/array/cpu/spmm.h index f4945c1c6b20..045309502552 100644 --- a/src/array/cpu/spmm.h +++ b/src/array/cpu/spmm.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/spmm.h * @brief SPMM CPU kernel function header. @@ -32,7 +32,7 @@ namespace cpu { #if !defined(_WIN32) #ifdef USE_AVX -/*! +/** * @brief CPU kernel of SpMM on Csr format using Xbyak. * @param cpu_spec JIT'ed kernel * @param bcast Broadcast information. @@ -69,7 +69,7 @@ void SpMMSumCsrXbyak( #endif // USE_AVX #endif // _WIN32 -/*! +/** * @brief Naive CPU kernel of SpMM on Csr format. * @param cpu_spec JIT'ed kernel * @param bcast Broadcast information. @@ -110,7 +110,7 @@ void SpMMSumCsrNaive( }); } -/*! +/** * @brief CPU kernel of SpMM on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. @@ -176,7 +176,7 @@ void SpMMSumCsr( #endif // _WIN32 } -/*! +/** * @brief CPU kernel of SpMM on Coo format. * @param bcast Broadcast information. * @param coo The Coo matrix. @@ -225,7 +225,7 @@ void SpMMSumCoo( } } -/*! +/** * @brief CPU kernel of SpMM-Min/Max on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. @@ -322,7 +322,7 @@ void SpMMCmpCsr( #endif // _WIN32 } -/*! +/** * @brief CPU kernel of SpMM-Min/Max on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. @@ -418,7 +418,7 @@ void SpMMCmpCsrHetero( }); } -/*! +/** * @brief CPU kernel of SpMM-Min/Max on Coo format. * @param bcast Broadcast information. * @param coo The Coo matrix. @@ -484,7 +484,7 @@ void SpMMCmpCoo( } } -/*! +/** * @brief CPU kernel of Edge_softmax_csr_forward on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. @@ -532,7 +532,7 @@ void Edge_softmax_csr_forward( }); } -/*! +/** * @brief CPU kernel of Edge_softmax_csr_backward on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. diff --git a/src/array/cpu/spmm_binary_ops.h b/src/array/cpu/spmm_binary_ops.h index 5d55c9b421f6..d4210ed75581 100644 --- a/src/array/cpu/spmm_binary_ops.h +++ b/src/array/cpu/spmm_binary_ops.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/spmm_binary_ops.h * @brief SPMM CPU Binary ops. diff --git a/src/array/cpu/spmm_blocking_libxsmm.h b/src/array/cpu/spmm_blocking_libxsmm.h index da55e8ab561b..00e8d26ee9ea 100644 --- a/src/array/cpu/spmm_blocking_libxsmm.h +++ b/src/array/cpu/spmm_blocking_libxsmm.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 Intel Corporation * @file array/cpu/spmm.h * @brief SPMM CPU kernel function header. @@ -48,7 +48,7 @@ int32_t GetLLCSize() { return cache_size; } -/*! +/** * @brief Tile the CSR matrix to roughly make sure that the column tiles and * corresponding neighbor features fit into LLC and the row tiles * are assigned to OMP threads. @@ -165,7 +165,7 @@ inline void SpMMCreateBlocks( } } -/*! +/** * @brief Create libxsmm kernel. * @param has_idx For the edge features, are there indices available. * @param N Feature size. @@ -269,7 +269,7 @@ inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel( return kernel; } -/*! +/** * @brief Use libxsmm to perform SpMM-Sum on all blocks. * @param block_csr_array The array containing csr matrices of all blocks. * @param B The feature on source nodes. @@ -326,7 +326,7 @@ inline void SpMMBlockwiseOpSum( } } -/*! +/** * @brief Use libxsmm to perform SpMM-Max/Min on all blocks. * @param block_csr_array The array containing csr matrices of all blocks. * @param B The feature on source nodes. @@ -390,7 +390,7 @@ inline void SpMMBlockwiseOpCmp( } } -/*! +/** * @brief Free the tiled CSR matrix data. * @param block_csr_array The array containing csr matrices of all blocks. * @param num_M_blocks Number of blocks to create along the rows of adjacency @@ -412,7 +412,7 @@ inline void SpMMFreeBlocks( free(block_csr_array); } -/*! +/** * @brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. @@ -550,7 +550,7 @@ void SpMMRedopCsrOpt( #endif // DEBUG } -/*! +/** * @brief Optimized CPU kernel of SpMM-Sum on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. @@ -568,7 +568,7 @@ void SpMMSumCsrLibxsmm( bcast, csr, ufeat, efeat, out, dummy, dummy); } -/*! +/** * @brief Optimized CPU kernel of SpMM-Min/Max on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. diff --git a/src/array/cpu/traversal.cc b/src/array/cpu/traversal.cc index bf8da6f62856..4f567e26ef06 100644 --- a/src/array/cpu/traversal.cc +++ b/src/array/cpu/traversal.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/traversal.cc * @brief Graph traversal implementation diff --git a/src/array/cpu/traversal.h b/src/array/cpu/traversal.h index dd90d0a8a6fc..b593332746c1 100644 --- a/src/array/cpu/traversal.h +++ b/src/array/cpu/traversal.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/traversal.h * @brief Graph traversal routines. @@ -21,7 +21,7 @@ namespace dgl { namespace aten { namespace impl { -/*! +/** * @brief Traverse the graph in a breadth-first-search (BFS) order. * * The queue object must suffice following interface: @@ -85,7 +85,7 @@ void BFSTraverseNodes( } } -/*! +/** * @brief Traverse the graph in a breadth-first-search (BFS) order, returning * the edges of the BFS tree. * @@ -153,7 +153,7 @@ void BFSTraverseEdges( } } -/*! +/** * @brief Traverse the graph in topological order. * * The queue object must suffice following interface: @@ -226,13 +226,13 @@ void TopologicalNodes( } } -/*!\brief Tags for ``DFSEdges``. */ +/** @brief Tags for ``DFSEdges``. */ enum DFSEdgeTag { kForward = 0, kReverse, kNonTree, }; -/*! +/** * @brief Traverse the graph in a depth-first-search (DFS) order. * * The traversal visit edges in its DFS order. Edges have three tags: diff --git a/src/array/cuda/array_cumsum.cu b/src/array/cuda/array_cumsum.cu index 086cc92c2cdb..297c6a44fd12 100644 --- a/src/array/cuda/array_cumsum.cu +++ b/src/array/cuda/array_cumsum.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/array_cumsum.cu * @brief Array cumsum GPU implementation diff --git a/src/array/cuda/array_index_select.cu b/src/array/cuda/array_index_select.cu index 64588d6e67b4..7fd8d7af6e88 100644 --- a/src/array/cuda/array_index_select.cu +++ b/src/array/cuda/array_index_select.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/array_index_select.cu * @brief Array index select GPU implementation diff --git a/src/array/cuda/array_index_select.cuh b/src/array/cuda/array_index_select.cuh index aaf6b892907a..d5c16e203e2d 100644 --- a/src/array/cuda/array_index_select.cuh +++ b/src/array/cuda/array_index_select.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021-2022 by Contributors * @file array/cuda/array_index_select.cuh * @brief Array index select GPU kernel implementation diff --git a/src/array/cuda/array_nonzero.cu b/src/array/cuda/array_nonzero.cu index ad2cf81fdfc0..9780ea0efec9 100644 --- a/src/array/cuda/array_nonzero.cu +++ b/src/array/cuda/array_nonzero.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/array_nonzero.cc * @brief Array nonzero CPU implementation diff --git a/src/array/cuda/array_op_impl.cu b/src/array/cuda/array_op_impl.cu index e3b485fa2c22..a98f8afba28f 100644 --- a/src/array/cuda/array_op_impl.cu +++ b/src/array/cuda/array_op_impl.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020-2021 by Contributors * @file array/cuda/array_op_impl.cu * @brief Array operator GPU implementation diff --git a/src/array/cuda/array_scatter.cu b/src/array/cuda/array_scatter.cu index f5077dfb2a4a..d6a6a542db7a 100644 --- a/src/array/cuda/array_scatter.cu +++ b/src/array/cuda/array_scatter.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cuda/array_scatter.cu * @brief Array scatter GPU implementation diff --git a/src/array/cuda/array_sort.cu b/src/array/cuda/array_sort.cu index 1220f4869668..dded166b961d 100644 --- a/src/array/cuda/array_sort.cu +++ b/src/array/cuda/array_sort.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/array_sort.cu * @brief Array sort GPU implementation diff --git a/src/array/cuda/atomic.cuh b/src/array/cuda/atomic.cuh index bd085857abed..8afd37f81a62 100644 --- a/src/array/cuda/atomic.cuh +++ b/src/array/cuda/atomic.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cuda/atomic.cuh * @brief Atomic functions diff --git a/src/array/cuda/coo2csr.cu b/src/array/cuda/coo2csr.cu index 6e8b0e55c4c3..1105adef6c82 100644 --- a/src/array/cuda/coo2csr.cu +++ b/src/array/cuda/coo2csr.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/coo2csr.cc * @brief COO2CSR @@ -58,7 +58,7 @@ CSRMatrix COOToCSR(COOMatrix coo) { coo.num_rows, coo.num_cols, indptr, coo.col, coo.data, col_sorted); } -/*! +/** * @brief Search for the insertion positions for needle in the hay. * * The hay is a list of sorted elements and the result is the insertion position diff --git a/src/array/cuda/coo_sort.cu b/src/array/cuda/coo_sort.cu index 31443c3cb3b1..717568e366ea 100644 --- a/src/array/cuda/coo_sort.cu +++ b/src/array/cuda/coo_sort.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/coo_sort.cc * @brief Sort COO index diff --git a/src/array/cuda/csr2coo.cu b/src/array/cuda/csr2coo.cu index 44743b6ee1bb..db3b46b9ae4f 100644 --- a/src/array/cuda/csr2coo.cu +++ b/src/array/cuda/csr2coo.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/csr2coo.cc * @brief CSR2COO @@ -45,7 +45,7 @@ COOMatrix CSRToCOO(CSRMatrix csr) { csr.num_rows, csr.num_cols, row, indices, data, true, csr.sorted); } -/*! +/** * @brief Repeat elements * @param val Value to repeat * @param repeats Number of repeats for each value diff --git a/src/array/cuda/csr_get_data.cu b/src/array/cuda/csr_get_data.cu index 534fda9fb9ec..422bf1bd157a 100644 --- a/src/array/cuda/csr_get_data.cu +++ b/src/array/cuda/csr_get_data.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/cuda/csr_get_data.cu * @brief Retrieve entries of a CSR matrix diff --git a/src/array/cuda/csr_mm.cu b/src/array/cuda/csr_mm.cu index 5c0052655ec4..7640b8930f61 100644 --- a/src/array/cuda/csr_mm.cu +++ b/src/array/cuda/csr_mm.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/csr_mm.cu * @brief SpSpMM/SpGEMM C APIs and definitions. @@ -18,7 +18,7 @@ namespace cusparse { #if 0 // disabling CUDA 11.0+ implementation for now because of problems on bigger graphs -/*! @brief Cusparse implementation of SpGEMM on Csr format for CUDA 11.0+ */ +/** @brief Cusparse implementation of SpGEMM on Csr format for CUDA 11.0+ */ template std::pair CusparseSpgemm( const CSRMatrix& A, @@ -127,7 +127,7 @@ std::pair CusparseSpgemm( #else // __CUDACC_VER_MAJOR__ != 11 -/*! @brief Cusparse implementation of SpGEMM on Csr format for older CUDA versions */ +/** @brief Cusparse implementation of SpGEMM on Csr format for older CUDA versions */ template std::pair CusparseSpgemm( const CSRMatrix& A, diff --git a/src/array/cuda/csr_sort.cu b/src/array/cuda/csr_sort.cu index add56a42a0a1..01c904a4dd5d 100644 --- a/src/array/cuda/csr_sort.cu +++ b/src/array/cuda/csr_sort.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/csr_sort.cc * @brief Sort CSR index @@ -16,7 +16,7 @@ using runtime::NDArray; namespace aten { namespace impl { -/*! +/** * @brief Check whether each row is sorted. */ template diff --git a/src/array/cuda/csr_sum.cu b/src/array/cuda/csr_sum.cu index 88843dfea540..47568edc01c8 100644 --- a/src/array/cuda/csr_sum.cu +++ b/src/array/cuda/csr_sum.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/spmm.cu * @brief SpGEAM C APIs and definitions. @@ -16,7 +16,7 @@ using namespace dgl::runtime; namespace aten { namespace cusparse { -/*! Cusparse implementation of SpSum on Csr format. */ +/** Cusparse implementation of SpSum on Csr format. */ template std::pair CusparseCsrgeam2( const CSRMatrix& A, diff --git a/src/array/cuda/csr_transpose.cc b/src/array/cuda/csr_transpose.cc index fe8714afd815..063e556f601a 100644 --- a/src/array/cuda/csr_transpose.cc +++ b/src/array/cuda/csr_transpose.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/csr_transpose.cc * @brief CSR transpose (convert to CSC) diff --git a/src/array/cuda/cuda_filter.cu b/src/array/cuda/cuda_filter.cu index 81a0ba976dcf..5afaba1b2b16 100644 --- a/src/array/cuda/cuda_filter.cu +++ b/src/array/cuda/cuda_filter.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/cuda/cuda_filter.cc * @brief Object for selecting items in a set, or selecting items not in a set. diff --git a/src/array/cuda/cusparse_dispatcher.cuh b/src/array/cuda/cusparse_dispatcher.cuh index da016c1327da..db4f23fd6f91 100644 --- a/src/array/cuda/cusparse_dispatcher.cuh +++ b/src/array/cuda/cusparse_dispatcher.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/dispatcher.cuh * @brief Templates to dispatch into different cuSPARSE routines based on the type @@ -15,7 +15,7 @@ namespace dgl { namespace aten { -/*! @brief cusparseXcsrgemm dispatcher */ +/** @brief cusparseXcsrgemm dispatcher */ template struct CSRGEMM { template @@ -122,7 +122,7 @@ struct CSRGEMM { } }; -/*! @brief cusparseXcsrgeam dispatcher */ +/** @brief cusparseXcsrgeam dispatcher */ template struct CSRGEAM { template diff --git a/src/array/cuda/dgl_cub.cuh b/src/array/cuda/dgl_cub.cuh index d8fcb5e91ca3..240f68d4a373 100644 --- a/src/array/cuda/dgl_cub.cuh +++ b/src/array/cuda/dgl_cub.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file cuda_common.h * @brief Wrapper to place cub in dgl namespace. diff --git a/src/array/cuda/fp16.cuh b/src/array/cuda/fp16.cuh index 18e61e8f6866..c0648d1f5df4 100644 --- a/src/array/cuda/fp16.cuh +++ b/src/array/cuda/fp16.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020-2022 by Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/array/cuda/functor.cuh b/src/array/cuda/functor.cuh index 6e06af091a60..7cb9927c4b6c 100644 --- a/src/array/cuda/functor.cuh +++ b/src/array/cuda/functor.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/functor.cuh * @brief Functors for template on CUDA diff --git a/src/array/cuda/gather_mm.cu b/src/array/cuda/gather_mm.cu index 62b604dcd963..88f17a797416 100644 --- a/src/array/cuda/gather_mm.cu +++ b/src/array/cuda/gather_mm.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/gather_mm.cu * @brief GatherMM C APIs and definitions. @@ -15,7 +15,7 @@ namespace aten { namespace { -/*! @brief Call cuBLAS GEMM API for dense matmul operation for float and double. */ +/** @brief Call cuBLAS GEMM API for dense matmul operation for float and double. */ template cublasStatus_t cublasGemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, @@ -198,7 +198,7 @@ __global__ void GatherMMScatterKernel2( } // namespace cuda -/*! +/** * @brief Implementation of Gather_mm operator. The input matrix A is * expected to be sorted according to relation type. * @param A The input dense matrix of dimension m x k @@ -309,7 +309,7 @@ void SegmentMMBackwardB(const NDArray A, } } -/*! +/** * @brief Implementation of Gather_mm operator. The input matrix A is * expected to be sorted according to relation type. * @param A The input dense matrix of dimension m x k @@ -346,7 +346,7 @@ void GatherMM(const NDArray A, tot_num_rows, in_len, out_len); } -/*! +/** * @brief Implementation of Gather_mm operator. The input matrix A is * expected to be sorted according to relation type. * @param A The input dense matrix of dimension m x k diff --git a/src/array/cuda/ge_spmm.cuh b/src/array/cuda/ge_spmm.cuh index 0bd7df150ae7..863f90e97876 100644 --- a/src/array/cuda/ge_spmm.cuh +++ b/src/array/cuda/ge_spmm.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/ge_spmm.cuh * @brief GE-SpMM CUDA kernel function header. @@ -18,7 +18,7 @@ using namespace cuda; namespace aten { namespace cuda { -/*! +/** * @brief CUDA kernel of GE-SpMM on Csr. * @note GE-SpMM: https://arxiv.org/pdf/2007.03179.pdf * The grid dimension x and y are reordered for better performance. diff --git a/src/array/cuda/macro.cuh b/src/array/cuda/macro.cuh index d71200b74de1..ad24a9445273 100644 --- a/src/array/cuda/macro.cuh +++ b/src/array/cuda/macro.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/macro.cuh * @brief Macro to call SPMM/SDDMM cuda kernels. diff --git a/src/array/cuda/negative_sampling.cu b/src/array/cuda/negative_sampling.cu index d394332387d0..03edb8616511 100644 --- a/src/array/cuda/negative_sampling.cu +++ b/src/array/cuda/negative_sampling.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/cuda/negative_sampling.cu * @brief rowwise sampling @@ -80,7 +80,7 @@ struct IsNotMinusOne { } }; -/*! +/** * @brief Sort ordered pairs in ascending order, using \a tmp_major and \a * tmp_minor as temporary buffers, each with \a n elements. */ diff --git a/src/array/cuda/rowwise_sampling.cu b/src/array/cuda/rowwise_sampling.cu index 43ea50314ba2..d9f82a3e1034 100644 --- a/src/array/cuda/rowwise_sampling.cu +++ b/src/array/cuda/rowwise_sampling.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/cuda/rowwise_sampling.cu * @brief uniform rowwise sampling diff --git a/src/array/cuda/rowwise_sampling_prob.cu b/src/array/cuda/rowwise_sampling_prob.cu index 2ebffcfb7f17..141af6b90530 100644 --- a/src/array/cuda/rowwise_sampling_prob.cu +++ b/src/array/cuda/rowwise_sampling_prob.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2022 by Contributors * @file array/cuda/rowwise_sampling_prob.cu * @brief weighted rowwise sampling. The degree computing kernels and diff --git a/src/array/cuda/sddmm.cu b/src/array/cuda/sddmm.cu index 04054e9bb637..1f4a2cb060f1 100644 --- a/src/array/cuda/sddmm.cu +++ b/src/array/cuda/sddmm.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/sddmm.cu * @brief SDDMM C APIs and definitions. @@ -10,7 +10,7 @@ namespace dgl { namespace aten { -/*! +/** * @brief CUDA implementation of g-SDDMM on Csr format. */ template @@ -30,7 +30,7 @@ void SDDMMCsr(const std::string& op, } -/*! +/** * @brief CUDA implementation of g-SDDMM on Coo format. */ template diff --git a/src/array/cuda/sddmm.cuh b/src/array/cuda/sddmm.cuh index 1b79160d9182..fcadcad12345 100644 --- a/src/array/cuda/sddmm.cuh +++ b/src/array/cuda/sddmm.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/sddmm.cuh * @brief SDDMM CUDA kernel function header. @@ -86,7 +86,7 @@ namespace cuda { constexpr unsigned int full_mask = 0xffffffff; -/*! +/** * @brief CUDA kernel of g-SDDMM on Coo format. * @note it uses edge parallel strategy, different threadblocks (on y-axis) * is responsible for the computation on different edges. Threadblocks @@ -135,7 +135,7 @@ __global__ void SDDMMCooKernel( } } -/*! +/** * @brief CUDA kernel of SDDMM-dot on Coo format, accelerated with tree reduction. * @note it uses edge parallel strategy, different threadblocks (on y-axis) * is responsible for the computation on different edges. Threadblocks @@ -203,7 +203,7 @@ __device__ __forceinline__ Idx BinarySearchSrc(const Idx *array, Idx length, Idx } } -/*! +/** * @brief CUDA kernel of g-SDDMM on Csr format. * @note it uses edge parallel strategy, different threadblocks (on y-axis) * is responsible for the computation on different edges. Threadblocks @@ -254,7 +254,7 @@ __global__ void SDDMMCsrKernel( } } -/*! +/** * @brief CUDA implementation of g-SDDMM on Coo format. * @param bcast Broadcast information. * @param coo The Coo matrix. @@ -323,7 +323,7 @@ void SDDMMCoo( } } -/*! +/** * @brief CUDA implementation of g-SDDMM on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. diff --git a/src/array/cuda/sddmm_hetero_coo.cu b/src/array/cuda/sddmm_hetero_coo.cu index f8ad7636d4a3..a70682a14658 100644 --- a/src/array/cuda/sddmm_hetero_coo.cu +++ b/src/array/cuda/sddmm_hetero_coo.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/sddmm.cu * @brief SDDMM C APIs and definitions. @@ -9,7 +9,7 @@ namespace dgl { namespace aten { -/*! +/** * @brief CUDA implementation of g-SDDMM on heterograph using Csr format. */ diff --git a/src/array/cuda/sddmm_hetero_csr.cu b/src/array/cuda/sddmm_hetero_csr.cu index 49c873e69098..daf601f813fa 100644 --- a/src/array/cuda/sddmm_hetero_csr.cu +++ b/src/array/cuda/sddmm_hetero_csr.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/sddmm.cu * @brief SDDMM C APIs and definitions. @@ -9,7 +9,7 @@ namespace dgl { namespace aten { -/*! +/** * @brief CUDA implementation of g-SDDMM on heterograph using Csr format. */ diff --git a/src/array/cuda/segment_reduce.cu b/src/array/cuda/segment_reduce.cu index 6fdb7533a259..74f3d0603e22 100644 --- a/src/array/cuda/segment_reduce.cu +++ b/src/array/cuda/segment_reduce.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/segment_reduce.cu * @brief Segment reduce C APIs and definitions. diff --git a/src/array/cuda/segment_reduce.cuh b/src/array/cuda/segment_reduce.cuh index 49ac5cfc38e0..8b4bfb3e87cc 100644 --- a/src/array/cuda/segment_reduce.cuh +++ b/src/array/cuda/segment_reduce.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/segment_reduce.cuh * @brief Segment reduce kernel function header. @@ -20,7 +20,7 @@ using namespace cuda; namespace aten { namespace cuda { -/*! +/** * @brief CUDA kernel of segment reduce. * @note each blockthread is responsible for aggregation on a row * in the result tensor. @@ -44,7 +44,7 @@ __global__ void SegmentReduceKernel( } } -/*! +/** * @brief CUDA kernel of scatter add. * @note each blockthread is responsible for adding a row in feature tensor * to a target row in output tensor. @@ -62,7 +62,7 @@ __global__ void ScatterAddKernel( } } -/*! +/** * @brief CUDA kernel to update gradients for reduce op max/min * @note each WARP (group of 32 threads) is responsible for adding a row in * feature tensor to a target row in output tensor. @@ -90,7 +90,7 @@ __global__ void UpdateGradMinMaxHeteroKernel( } } -/*! +/** * @brief CUDA kernel of backward phase in segment min/max. * @note each blockthread is responsible for writing a row in the * result gradient tensor by lookup the ArgMin/Max for index information. @@ -110,7 +110,7 @@ __global__ void BackwardSegmentCmpKernel( } } -/*! +/** * @brief CUDA implementation of forward phase of Segment Reduce. * @param feat The input tensor. * @param offsets The offsets tensor. @@ -141,7 +141,7 @@ void SegmentReduce(NDArray feat, NDArray offsets, NDArray out, NDArray arg) { feat_data, offsets_data, out_data, arg_data, n, dim); } -/*! +/** * @brief CUDA implementation of Scatter Add (on first dimension). * @note math equation: out[idx[i], *] += feat[i, *] * @param feat The input tensor. @@ -170,7 +170,7 @@ void ScatterAdd(NDArray feat, NDArray idx, NDArray out) { idx_data, out_data, n, dim); } -/*! +/** * @brief CUDA implementation to update gradients for reduce op max/min * @param graph The input heterogeneous graph. * @param op The binary operator, could be `copy_u`, `copy_e'. @@ -223,7 +223,7 @@ void UpdateGradMinMax_hetero( } } -/*! +/** * @brief CUDA implementation of backward phase of Segment Reduce with Min/Max * reducer. * @note math equation: out[arg[i, k], k] = feat[i, k] \param feat The input diff --git a/src/array/cuda/spmat_op_impl_coo.cu b/src/array/cuda/spmat_op_impl_coo.cu index 8ffcbdec83a5..dddcb6c01413 100644 --- a/src/array/cuda/spmat_op_impl_coo.cu +++ b/src/array/cuda/spmat_op_impl_coo.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by contributors. * @file array/cuda/spmat_op_impl_coo.cu * @brief COO operator GPU implementation diff --git a/src/array/cuda/spmat_op_impl_csr.cu b/src/array/cuda/spmat_op_impl_csr.cu index 8e4b2ae3ddaf..bdcd559773c1 100644 --- a/src/array/cuda/spmat_op_impl_csr.cu +++ b/src/array/cuda/spmat_op_impl_csr.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/spmat_op_impl_csr.cu * @brief CSR operator CPU implementation @@ -81,7 +81,7 @@ template NDArray CSRIsNonZero(CSRMatrix, NDArray, NDArray); ///////////////////////////// CSRHasDuplicate ///////////////////////////// -/*! +/** * @brief Check whether each row does not have any duplicate entries. * Assume the CSR is sorted. */ @@ -227,7 +227,7 @@ CSRMatrix CSRSliceRows(CSRMatrix csr, int64_t start, int64_t end) { template CSRMatrix CSRSliceRows(CSRMatrix, int64_t, int64_t); template CSRMatrix CSRSliceRows(CSRMatrix, int64_t, int64_t); -/*! +/** * @brief Copy data segment to output buffers * * For the i^th row r = row[i], copy the data from indptr[r] ~ indptr[r+1] @@ -298,7 +298,7 @@ template CSRMatrix CSRSliceRows(CSRMatrix, NDArray); ///////////////////////////// CSRGetDataAndIndices ///////////////////////////// -/*! +/** * @brief Generate a 0-1 mask for each index that hits the provided (row, col) * index. * @@ -330,7 +330,7 @@ __global__ void _SegmentMaskKernel( } } -/*! +/** * @brief Search for the insertion positions for needle in the hay. * * The hay is a list of sorted elements and the result is the insertion position @@ -423,7 +423,7 @@ template std::vector CSRGetDataAndIndices( ///////////////////////////// CSRSliceMatrix ///////////////////////////// -/*! +/** * @brief Generate a 0-1 mask for each index whose column is in the provided * set. It also counts the number of masked values per row. */ diff --git a/src/array/cuda/spmm.cu b/src/array/cuda/spmm.cu index c727951ff666..bb01e5d39d53 100644 --- a/src/array/cuda/spmm.cu +++ b/src/array/cuda/spmm.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/spmm.cu * @brief SPMM C APIs and definitions. @@ -15,7 +15,7 @@ using namespace cuda; namespace aten { -/*! +/** * @brief CUDA implementation of g-SpMM on Csr format. * @note use cusparse if the reduce operator is `sum` and there is * no broadcast, use dgl's kernel in other cases. @@ -80,7 +80,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce, } -/*! +/** * @brief CUDA implementation of g-SpMM on Coo format. */ template diff --git a/src/array/cuda/spmm.cuh b/src/array/cuda/spmm.cuh index 0a282aab3052..dab3cc39a05b 100644 --- a/src/array/cuda/spmm.cuh +++ b/src/array/cuda/spmm.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/spmm.cuh * @brief SPMM CUDA kernel function header. @@ -21,7 +21,7 @@ using namespace cuda; namespace aten { -/*! +/** * @brief Determine whether cusparse SpMM function is applicable. */ template @@ -41,7 +41,7 @@ inline bool cusparse_available(bool more_nnz_than_matrix_size) { namespace { -/*! @brief Call cuBLAS geam API for transpose operation for float and double. */ +/** @brief Call cuBLAS geam API for transpose operation for float and double. */ template cublasStatus_t Xgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, @@ -98,7 +98,8 @@ cublasStatus_t Xgeam(cublasHandle_t handle, cublasOperation_t transa, beta, B, ldb, C, ldc); } -/* @brief IndexSelect operator kernel implementation. +/** + * @brief IndexSelect operator kernel implementation. * @note duplicate of IndexSelectKernel defined in array_index_select.cu */ template @@ -112,7 +113,8 @@ __global__ void _IndexSelectKernel( out[i * m + j] = in[idx[i] * m + j]; } -/* @brief Transpose operator kernel implementation. +/** + * @brief Transpose operator kernel implementation. * @note not efficient but it's not a bottleneck, used for float16 dtype. */ template @@ -125,7 +127,7 @@ __global__ void _TransposeKernel( out[i * m + j] = in[j * n + i]; } -/* +/** * @brief Tranpose the input matrix. * @param row number of rows of input matrix. * @param col number of columns of input matrix. @@ -149,7 +151,7 @@ void _Transpose(const DType* in, DType* out, out, row)); } -/* +/** * @brief Tranpose the input matrix for data type half. * @note cuBLAS has no geam API for half data type, fallback to our kernel. */ @@ -163,7 +165,7 @@ void _Transpose(const half* in, half* out, } #if BF16_ENABLED -/* +/** * @brief Tranpose the input matrix for data type half. * @note cuBLAS has no geam API for bf16 data type, fallback to our kernel. */ @@ -177,7 +179,7 @@ void _Transpose<__nv_bfloat16>(const __nv_bfloat16* in, __nv_bfloat16* out, } #endif // BF16_ENABLED -/* +/** * @brief */ template @@ -247,7 +249,7 @@ cusparseStatus_t Xcsrmm2(cusparseHandle_t handle, cusparseOperation_t tr } #endif -/*! Cusparse implementation of SpMM on Csr format. */ +/** Cusparse implementation of SpMM on Csr format. */ template void CusparseCsrmm2( const DGLContext& ctx, @@ -347,7 +349,7 @@ void CusparseCsrmm2( device->FreeWorkspace(ctx, valptr); } -/*! Cusparse implementation of SpMM on Csr format. */ +/** Cusparse implementation of SpMM on Csr format. */ template void CusparseCsrmm2Hetero( const DGLContext& ctx, @@ -476,7 +478,7 @@ void CusparseCsrmm2Hetero( namespace cuda { -/*! +/** * @brief CUDA kernel of g-SpMM on Coo format. * @note it uses edge parallel strategy, different threadblocks (on y-axis) * is responsible for the computation on different edges. Threadblocks @@ -525,7 +527,7 @@ __global__ void SpMMCooKernel( } } -/*! +/** * @brief CUDA kernel to compute argu and arge in g-SpMM on Coo format. * @note it uses edge parallel strategy, different threadblocks (on y-axis) * is responsible for the computation on different edges. Threadblocks @@ -573,7 +575,7 @@ __global__ void ArgSpMMCooKernel( } } -/*! +/** * @brief CUDA kernel of g-SpMM on Csr format. * @note it uses node parallel strategy, different threadblocks (on y-axis) * is responsible for the computation on different destination nodes. @@ -631,7 +633,7 @@ __global__ void SpMMCsrKernel( } } -/*! +/** * @brief CUDA kernel of SpMM-Min/Max on Csr format. * @note it uses node parallel strategy, different threadblocks (on y-axis) * is responsible for the computation on different destination nodes. @@ -692,7 +694,7 @@ __global__ void SpMMCmpCsrHeteroKernel( } } -/*! +/** * @brief CUDA implementation of g-SpMM on Coo format. * @param bcast Broadcast information. * @param coo The Coo matrix. @@ -769,7 +771,7 @@ void SpMMCoo( }); } -/*! +/** * @brief CUDA implementation of g-SpMM on Csr format. * @param bcast Broadcast information. * @param csr The Csr matrix. @@ -824,7 +826,7 @@ void SpMMCsr( }); } -/*! +/** * @brief CUDA kernel of SpMM-Min/Max on Csr format on heterogeneous graph. * @param bcast Broadcast information. * @param csr The Csr matrix. diff --git a/src/array/cuda/spmm_hetero.cu b/src/array/cuda/spmm_hetero.cu index 21fb78308f75..c33db23e626c 100644 --- a/src/array/cuda/spmm_hetero.cu +++ b/src/array/cuda/spmm_hetero.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/spmm.cu * @brief SPMM C APIs and definitions. @@ -15,7 +15,7 @@ using namespace cuda; namespace aten { -/*! +/** * @brief CUDA implementation of g-SpMM on Csr format. * @note use cusparse if the reduce operator is `sum` and there is * no broadcast, use dgl's kernel in other cases. diff --git a/src/array/cuda/utils.cu b/src/array/cuda/utils.cu index dbcd28b544c1..85dcea88d7d0 100644 --- a/src/array/cuda/utils.cu +++ b/src/array/cuda/utils.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/utils.cu * @brief Utilities for CUDA kernels. diff --git a/src/array/cuda/utils.h b/src/array/cuda/utils.h index aca5f402481f..6b7d28f6a9ad 100644 --- a/src/array/cuda/utils.h +++ b/src/array/cuda/utils.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cuda/utils.h * @brief Utilities for CUDA kernels. @@ -23,7 +23,7 @@ namespace cuda { #define CUDA_MAX_NUM_THREADS 256 -/*! @brief Calculate the number of threads needed given the dimension length. +/** @brief Calculate the number of threads needed given the dimension length. * * It finds the biggest number that is smaller than min(dim, max_nthrs) * and is also power of two. @@ -39,7 +39,7 @@ inline int FindNumThreads(int dim, int max_nthrs = CUDA_MAX_NUM_THREADS) { return ret; } -/* +/** * !\brief Find number of blocks is smaller than nblks and max_nblks * on the given axis ('x', 'y' or 'z'). */ @@ -77,7 +77,7 @@ __device__ __forceinline__ T _ldg(T* addr) { #endif } -/*! +/** * @brief Return true if the given bool flag array is all true. * The input bool array is in int8_t type so it is aligned with byte address. * @@ -88,7 +88,7 @@ __device__ __forceinline__ T _ldg(T* addr) { */ bool AllTrue(int8_t* flags, int64_t length, const DGLContext& ctx); -/*! +/** * @brief CUDA Kernel of filling the vector started from ptr of size length * with val. * @note internal use only. @@ -103,7 +103,7 @@ __global__ void _FillKernel(DType* ptr, size_t length, DType val) { } } -/*! @brief Fill the vector started from ptr of size length with val */ +/** @brief Fill the vector started from ptr of size length with val */ template void _Fill(DType* ptr, size_t length, DType val) { cudaStream_t stream = runtime::getCurrentCUDAStream(); @@ -112,7 +112,7 @@ void _Fill(DType* ptr, size_t length, DType val) { CUDA_KERNEL_CALL(cuda::_FillKernel, nb, nt, 0, stream, ptr, length, val); } -/*! +/** * @brief Search adjacency list linearly for each (row, col) pair and * write the data under the matched position in the indices array to the output. * @@ -156,7 +156,7 @@ __global__ void _LinearSearchKernel( } #if BF16_ENABLED -/*! +/** * @brief Specialization for bf16 because conversion from long long to bfloat16 * doesn't exist before SM80. */ @@ -205,7 +205,7 @@ inline DType GetCUDAScalar( return result; } -/*! +/** * @brief Given a sorted array and a value this function returns the index * of the first element which compares greater than value. * @@ -230,7 +230,7 @@ __device__ IdType _UpperBound(const IdType *A, int64_t n, IdType x) { return l; } -/*! +/** * @brief Given a sorted array and a value this function returns the index * of the element who is equal to val. If not exist returns n+1 * diff --git a/src/array/cuda/uvm/array_index_select_uvm.cu b/src/array/cuda/uvm/array_index_select_uvm.cu index 221c9abb9e34..07ba67672934 100644 --- a/src/array/cuda/uvm/array_index_select_uvm.cu +++ b/src/array/cuda/uvm/array_index_select_uvm.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019-2022 by Contributors * @file array/cuda/uvm/array_index_select_uvm.cu * @brief Array index select GPU implementation diff --git a/src/array/cuda/uvm/array_index_select_uvm.cuh b/src/array/cuda/uvm/array_index_select_uvm.cuh index 503424f2c816..30510e438659 100644 --- a/src/array/cuda/uvm/array_index_select_uvm.cuh +++ b/src/array/cuda/uvm/array_index_select_uvm.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/cpu/array_index_select_uvm.cuh * @brief Array index select GPU kernel implementation @@ -13,9 +13,10 @@ namespace dgl { namespace aten { namespace impl { -/* This is a cross-device access version of IndexSelectMultiKernel. - * Since the memory access over PCIe is more sensitive to the - * data access aligment (cacheline), we need a separate version here. +/** + * This is a cross-device access version of IndexSelectMultiKernel. + * Since the memory access over PCIe is more sensitive to the + * data access aligment (cacheline), we need a separate version here. */ template __global__ void IndexSelectMultiKernelAligned( diff --git a/src/array/filter.cc b/src/array/filter.cc index 45a9c6b27e62..658816d60199 100644 --- a/src/array/filter.cc +++ b/src/array/filter.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/filter.cc * @brief Object for selecting items in a set, or selecting items not in a set. diff --git a/src/array/filter.h b/src/array/filter.h index 1ec0fa0aef25..93b47b1b5abf 100644 --- a/src/array/filter.h +++ b/src/array/filter.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file array/filter.h * @brief Object for selecting items in a set, or selecting items not in a set. diff --git a/src/array/kernel.cc b/src/array/kernel.cc index 23ffd83051ce..33ad0d6efd5a 100644 --- a/src/array/kernel.cc +++ b/src/array/kernel.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/kernel.cc * @brief New kernels @@ -23,7 +23,7 @@ namespace { } // namespace -/*! @brief Generalized Sparse Matrix-Matrix Multiplication. */ +/** @brief Generalized Sparse Matrix-Matrix Multiplication. */ void SpMM(const std::string& op, const std::string& reduce, HeteroGraphPtr graph, NDArray ufeat, @@ -54,7 +54,7 @@ void SpMM(const std::string& op, const std::string& reduce, } -/*! @brief Generalized segmented dense Matrix-Matrix Multiplication. */ +/** @brief Generalized segmented dense Matrix-Matrix Multiplication. */ void SegmentMM(const NDArray A, const NDArray B, NDArray C, @@ -102,7 +102,7 @@ void SegmentMMBackwardB(const NDArray A, } -/*! @brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */ +/** @brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */ void GatherMM(const NDArray A, const NDArray B, NDArray C, @@ -139,7 +139,7 @@ void GatherMM(const NDArray A, } -/*! @brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */ +/** @brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */ void GatherMMScatter(const NDArray A, const NDArray B, NDArray C, @@ -179,7 +179,7 @@ void GatherMMScatter(const NDArray A, } -/*! @brief Generalized Sparse Matrix-Matrix Multiplication with hetero-graph support. */ +/** @brief Generalized Sparse Matrix-Matrix Multiplication with hetero-graph support. */ void SpMMHetero(const std::string& op, const std::string& reduce, HeteroGraphPtr graph, const std::vector& ufeat_vec, @@ -227,7 +227,7 @@ void SpMMHetero(const std::string& op, const std::string& reduce, } -/*! @brief Generalized Sampled Dense-Dense Matrix Multiplication. */ +/** @brief Generalized Sampled Dense-Dense Matrix Multiplication. */ void SDDMM(const std::string& op, HeteroGraphPtr graph, NDArray lhs, @@ -258,7 +258,7 @@ void SDDMM(const std::string& op, }); } -/*! +/** * @brief Find the src/dst/etype id based on the target 'u', 'v' or 'e'. * * @param graph The input graph. @@ -274,7 +274,7 @@ int get_typeid_by_target(HeteroGraphPtr graph, int target, dgl_type_t etype) { return etype; } -/*! @brief Generalized Sampled Dense-Dense Matrix Multiplication. */ +/** @brief Generalized Sampled Dense-Dense Matrix Multiplication. */ void SDDMMHetero(const std::string& op, HeteroGraphPtr graph, std::vector lhs, @@ -322,7 +322,7 @@ void SDDMMHetero(const std::string& op, } -/*! @brief Generalized Edge_softmax op for forward */ +/** @brief Generalized Edge_softmax op for forward */ void Edge_softmax_forward(const std::string& op, HeteroGraphPtr graph, NDArray ufeat, @@ -342,7 +342,7 @@ void Edge_softmax_forward(const std::string& op, } -/*! @brief Generalized Edge_softmax op for backward */ +/** @brief Generalized Edge_softmax op for backward */ void Edge_softmax_backward(const std::string& op, HeteroGraphPtr graph, NDArray out, @@ -372,7 +372,7 @@ NDArray GetEdgeMapping(HeteroGraphRef graph) { } } -/*! @brief Segment reduce dispatch function. */ +/** @brief Segment reduce dispatch function. */ void SegmentReduceDispatch(const std::string& op, NDArray feat, NDArray offsets, @@ -387,7 +387,7 @@ void SegmentReduceDispatch(const std::string& op, }); } -/*! @brief Scatter Add (on first dimension) dispatch function. */ +/** @brief Scatter Add (on first dimension) dispatch function. */ void ScatterAddDispatch(NDArray feat, NDArray idx, NDArray out) { ATEN_XPU_SWITCH_CUDA(feat->ctx.device_type, XPU, "ScatterAdd", { ATEN_ID_TYPE_SWITCH(idx->dtype, IdType, { @@ -398,7 +398,7 @@ void ScatterAddDispatch(NDArray feat, NDArray idx, NDArray out) { }); } -/*! @brief Update gradients (reduce op max/min) dispatch function on heterogeneous graph. */ +/** @brief Update gradients (reduce op max/min) dispatch function on heterogeneous graph. */ void UpdateGradMinMaxDispatchHetero(const HeteroGraphPtr& graph, const std::string& op, const std::vector& feat, @@ -416,7 +416,7 @@ void UpdateGradMinMaxDispatchHetero(const HeteroGraphPtr& graph, }); } -/*! @brief Backward segment cmp dispatch function.*/ +/** @brief Backward segment cmp dispatch function.*/ void BackwardSegmentCmpDispatch(NDArray feat, NDArray arg, NDArray out) { ATEN_XPU_SWITCH_CUDA(feat->ctx.device_type, XPU, "BackwardSegmentCmp", { ATEN_ID_TYPE_SWITCH(arg->dtype, IdType, { @@ -723,7 +723,7 @@ DGL_REGISTER_GLOBAL("sparse._CAPI_DGLKernelGetEdgeMapping") *rv = GetEdgeMapping(graph); }); -/*! +/** * @brief Sparse matrix multiplication with graph interface. * * @param A_ref The left operand. diff --git a/src/array/kernel_decl.h b/src/array/kernel_decl.h index a69c393ee6b3..bbea57128dc9 100644 --- a/src/array/kernel_decl.h +++ b/src/array/kernel_decl.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/kernel_decl.h * @brief Sparse matrix format-specific operator declarations. @@ -17,7 +17,7 @@ namespace dgl { namespace aten { -/*! +/** * @brief Generalized Sparse Matrix Dense Matrix Multiplication on Csr format. */ template @@ -29,7 +29,7 @@ void SpMMCsr(const std::string& op, const std::string& reduce, NDArray out, std::vector out_aux); -/*! +/** * @brief Generalized Sparse Matrix Dense Matrix Multiplication on Csr format with heterograph support. */ @@ -43,7 +43,7 @@ void SpMMCsrHetero(const std::string& op, const std::string& reduce, std::vector>* out_aux, const std::vector& ufeat_eid, const std::vector& out_eid); -/*! +/** * @brief Generalized Sparse Matrix Dense Matrix Multiplication on Coo format. */ template @@ -55,7 +55,7 @@ void SpMMCoo(const std::string& op, const std::string& reduce, NDArray out, std::vector out_aux); -/*! +/** * @brief Generalized Sampled Dense-Dense Matrix Multiplication on Csr format. */ template @@ -67,7 +67,7 @@ void SDDMMCsr(const std::string& op, NDArray out, int lhs_target, int rhs_target); -/*! +/** * @brief Generalized Sampled Dense-Dense Matrix Multiplication on Csr format with heterograph support. */ @@ -83,7 +83,7 @@ void SDDMMCsrHetero(const std::string& op, const std::vector& ufeat_eid, const std::vector& out_eid); -/*! +/** * @brief Generalized Sampled Dense-Dense Matrix Multiplication on Coo format. */ template @@ -96,7 +96,7 @@ void SDDMMCoo(const std::string& op, int lhs_target, int rhs_target); -/*! +/** * @brief Generalized Sampled Dense-Dense Matrix Multiplication on Coo format with heterograph support. */ @@ -112,7 +112,7 @@ void SDDMMCooHetero(const std::string& op, const std::vector& lhs_eid, const std::vector& rhs_eid); -/*! +/** * @brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */ template @@ -122,7 +122,7 @@ void GatherMM(const NDArray A, const NDArray idx_a, const NDArray idx_b); -/*! +/** * @brief Generalized Dense Matrix-Matrix Multiplication according to relation types. */ template @@ -133,7 +133,7 @@ void GatherMMScatter(const NDArray A, const NDArray idx_b, const NDArray idx_c); -/*! +/** * @brief Generalized segmented dense Matrix-Matrix Multiplication. */ template @@ -149,7 +149,7 @@ void SegmentMMBackwardB(const NDArray A, NDArray dB, const NDArray seglen); -/*! +/** * @brief Segment reduce. */ template @@ -159,7 +159,7 @@ void SegmentReduce(const std::string& op, NDArray out, NDArray arg); -/*! +/** * @brief Scatter Add on first dimension. */ template @@ -167,7 +167,7 @@ void ScatterAdd(NDArray feat, NDArray idx, NDArray out); -/*! +/** * @brief Update gradients for reduce operator max and min on first dimension. */ template @@ -178,7 +178,7 @@ void UpdateGradMinMax_hetero(const HeteroGraphPtr& g, const std::vector& idx_etype, std::vector* out); -/*! +/** * @brief Backward function of segment cmp. */ template @@ -186,7 +186,7 @@ void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out); -/*! +/** * @brief Sparse-sparse matrix multiplication * * @param A The left operand. @@ -205,7 +205,7 @@ std::pair CSRMM( const CSRMatrix& B, NDArray B_weights); -/*! +/** * @brief Sparse-sparse matrix summation. * * @param A The sparse matrices with the same size. @@ -220,7 +220,7 @@ std::pair CSRSum( const std::vector& A, const std::vector& A_weights); -/*! +/** * @brief Edge_softmax_csr forward function on Csr format. */ template @@ -230,7 +230,7 @@ void Edge_softmax_csr_forward(const std::string& op, NDArray ufeat, NDArray efeat, NDArray out); -/*! +/** * @brief Edge_softmax_csr backward function on Csr format. */ template diff --git a/src/array/libra_partition.cc b/src/array/libra_partition.cc index 1330111ef622..5da74a23837c 100644 --- a/src/array/libra_partition.cc +++ b/src/array/libra_partition.cc @@ -1,4 +1,4 @@ -/* +/** Copyright (c) 2021 Intel Corporation \file distgnn/partition/main_Libra.py \brief Libra - Vertex-cut based graph partitioner for distirbuted training @@ -42,7 +42,7 @@ int32_t Ver2partition(IdType in_val, int64_t *node_map, int32_t num_parts) { LOG(FATAL) << "Error: Unexpected output in Ver2partition!"; } -/*! +/** * @brief Identifies the lead loaded partition/community for a given edge * assignment. */ @@ -65,7 +65,7 @@ int32_t LeastLoad(int64_t *community_edges, int32_t nc) { return loc[r]; } -/*! +/** * @brief Libra - vertexcut based graph partitioning. * It takes list of edges from input DGL graph and distributed them among nc * partitions During edge distribution, Libra assign a given edge to a partition @@ -324,7 +324,7 @@ DGL_REGISTER_GLOBAL("sparse._CAPI_DGLLibraVertexCut") }); }); -/*! +/** * @brief * 1. Builds dictionary (ldt) for assigning local node IDs to nodes in the * partitions @@ -456,7 +456,7 @@ DGL_REGISTER_GLOBAL("sparse._CAPI_DGLLibra2dglBuildDict") *rv = ret; }); -/*! +/** * @brief sets up the 1-level tree among the clones of the split-nodes. * @param[in] gdt_key global dict for assigning consecutive node IDs to nodes * across all the partitions @@ -506,7 +506,7 @@ DGL_REGISTER_GLOBAL("sparse._CAPI_DGLLibra2dglSetLR") Libra2dglSetLR(gdt_key, gdt_value, lrtensor, nc, Nn); }); -/*! +/** * @brief For each node in a partition, it creates a list of remote clone IDs; * also, for each node in a partition, it gathers the data (feats, label, * trian, test) from input graph. diff --git a/src/array/selector.h b/src/array/selector.h index 4e48e922cd30..1257cf18be31 100644 --- a/src/array/selector.h +++ b/src/array/selector.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/selector.h * @brief Selector functions to select among src/edge/dst attributes. @@ -22,7 +22,7 @@ namespace { } // namespace -/*! +/** * @brief Select among src/edge/dst feature/idx. * @note the integer argument target specifies which target * to choose, 0: src, 1: edge, 2: dst. diff --git a/src/array/union_partition.cc b/src/array/union_partition.cc index 8351cfa78ce4..42a4d6f0d07e 100644 --- a/src/array/union_partition.cc +++ b/src/array/union_partition.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file array/cpu/coo_union_partition.cc * @brief COO union and partition diff --git a/src/array/uvm_array.cc b/src/array/uvm_array.cc index b7cb93bab71d..671c4f262520 100644 --- a/src/array/uvm_array.cc +++ b/src/array/uvm_array.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019-2022 by Contributors * @file array/uvm_array.cc * @brief DGL array utilities implementation diff --git a/src/array/uvm_array_op.h b/src/array/uvm_array_op.h index 6b94b52dc84f..77ac83755788 100644 --- a/src/array/uvm_array_op.h +++ b/src/array/uvm_array_op.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019-2022 by Contributors * @file array/uvm_array_op.h * @brief Array operator templates diff --git a/src/bcast.cc b/src/bcast.cc index c6b2fc0e1b60..634a4c671c1a 100644 --- a/src/bcast.cc +++ b/src/bcast.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file kernel/bcast.h * @brief Broadcast related function implementations. @@ -11,7 +11,7 @@ namespace dgl { namespace { -/*! +/** * @brief Determine whether use broadcasting or not, given the operator * type, lhs array and rhs array. */ @@ -27,7 +27,7 @@ bool UseBcast(const std::string& op, NDArray lhs, NDArray rhs) { } // namespace -/*! +/** * @brief: Compute broadcast and auxiliary information given operator * and operands for kernel computation. * @note: Expect lhs, rhs to have ndim >= 2 and the shape of lhs/rhs diff --git a/src/c_api_common.cc b/src/c_api_common.cc index 512b1f3b352f..d0a1a716930b 100644 --- a/src/c_api_common.cc +++ b/src/c_api_common.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file c_runtime_api.cc * @brief DGL C API common implementations diff --git a/src/c_api_common.h b/src/c_api_common.h index 3d5d3c318db4..13508c95a37e 100644 --- a/src/c_api_common.h +++ b/src/c_api_common.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file c_api_common.h * @brief DGL C API common util functions @@ -25,13 +25,13 @@ typedef void* CommunicatorHandle; // KVstore message handler type typedef void* KVMsgHandle; -/*! +/** * @brief Convert a vector of NDArray to PackedFunc. */ dgl::runtime::PackedFunc ConvertNDArrayVectorToPackedFunc( const std::vector& vec); -/*! +/** * @brief Copy a vector to an NDArray. * * The data type of the NDArray will be IdType, which must be an integer type. diff --git a/src/geometry/cpu/geometry_op_impl.cc b/src/geometry/cpu/geometry_op_impl.cc index cc567f6d4029..4d5a2b6ea7e3 100644 --- a/src/geometry/cpu/geometry_op_impl.cc +++ b/src/geometry/cpu/geometry_op_impl.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file array/cpu/geometry_op_impl.cc * @brief Geometry operator CPU implementation @@ -16,7 +16,7 @@ using runtime::NDArray; namespace geometry { namespace impl { -/*! @brief Knuth shuffle algorithm */ +/** @brief Knuth shuffle algorithm */ template void IndexShuffle(IdType *idxs, int64_t num_elems) { for (int64_t i = num_elems - 1; i > 0; --i) { @@ -27,7 +27,7 @@ void IndexShuffle(IdType *idxs, int64_t num_elems) { template void IndexShuffle(int32_t *idxs, int64_t num_elems); template void IndexShuffle(int64_t *idxs, int64_t num_elems); -/*! @brief Groupwise index shuffle algorithm. This function will perform shuffle +/** @brief Groupwise index shuffle algorithm. This function will perform shuffle * in subarrays indicated by group index. The group index is similar to indptr * in CSRMatrix. * @@ -76,7 +76,7 @@ IdArray GroupRandomPerm( return perm; } -/*! +/** * @brief Farthest Point Sampler without the need to compute all pairs of * distance. * diff --git a/src/geometry/cuda/edge_coarsening_impl.cu b/src/geometry/cuda/edge_coarsening_impl.cu index aa934582f640..6907410c6477 100644 --- a/src/geometry/cuda/edge_coarsening_impl.cu +++ b/src/geometry/cuda/edge_coarsening_impl.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file geometry/cuda/edge_coarsening_impl.cu * @brief Edge coarsening CUDA implementation @@ -109,7 +109,7 @@ __global__ void weighted_respond_kernel( } } -/*! @brief The colorize procedure. This procedure randomly marks unmarked +/** @brief The colorize procedure. This procedure randomly marks unmarked * nodes with BLUE(-1) and RED(-2) and checks whether the node matching * process has finished. */ @@ -137,7 +137,7 @@ bool Colorize(IdType *result_data, int64_t num_nodes, float *const prop) { return done_h; } -/*! @brief Weighted neighbor matching procedure (GPU version). +/** @brief Weighted neighbor matching procedure (GPU version). * This implementation is from `A GPU Algorithm for Greedy Graph Matching * `__ * @@ -198,7 +198,7 @@ template void WeightedNeighborMatching( template void WeightedNeighborMatching( const aten::CSRMatrix &csr, const NDArray weight, IdArray result); -/*! @brief Unweighted neighbor matching procedure (GPU version). +/** @brief Unweighted neighbor matching procedure (GPU version). * Instead of directly sample neighbors, we assign each neighbor * with a random weight. We use random weight for 2 reasons: * 1. Random sample for each node in GPU is expensive. Although diff --git a/src/geometry/cuda/geometry_op_impl.cu b/src/geometry/cuda/geometry_op_impl.cu index 94cbbf8b9461..ac3b05966322 100644 --- a/src/geometry/cuda/geometry_op_impl.cu +++ b/src/geometry/cuda/geometry_op_impl.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file geometry/cuda/geometry_op_impl.cc * @brief Geometry operator CUDA implementation @@ -15,7 +15,7 @@ namespace dgl { namespace geometry { namespace impl { -/*! +/** * @brief Farthest Point Sampler without the need to compute all pairs of * distance. * diff --git a/src/geometry/geometry.cc b/src/geometry/geometry.cc index f3049d23fb79..58d5bfaacbdb 100644 --- a/src/geometry/geometry.cc +++ b/src/geometry/geometry.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file geometry/geometry.cc * @brief DGL geometry utilities implementation diff --git a/src/geometry/geometry_op.h b/src/geometry/geometry_op.h index d889a59c80c6..ede207d638df 100644 --- a/src/geometry/geometry_op.h +++ b/src/geometry/geometry_op.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file geometry/geometry_op.h * @brief Geometry operator templates @@ -17,7 +17,7 @@ void FarthestPointSampler( NDArray array, int64_t batch_size, int64_t sample_points, NDArray dist, IdArray start_idx, IdArray result); -/*! @brief Implementation of weighted neighbor matching process of edge +/** @brief Implementation of weighted neighbor matching process of edge * coarsening used in Metis and Graclus for homogeneous graph coarsening. This * procedure keeps picking an unmarked vertex and matching it with one its * unmarked neighbors (that maximizes its edge weight) until no match can be @@ -27,7 +27,7 @@ template void WeightedNeighborMatching( const aten::CSRMatrix &csr, const NDArray weight, IdArray result); -/*! @brief Implementation of neighbor matching process of edge coarsening used +/** @brief Implementation of neighbor matching process of edge coarsening used * in Metis and Graclus for homogeneous graph coarsening. This procedure keeps * picking an unmarked vertex and matching it with one its unmarked neighbors * (that maximizes its edge weight) until no match can be done. diff --git a/src/graph/creators.cc b/src/graph/creators.cc index 889bbc3e5f2d..a5d33281b841 100644 --- a/src/graph/creators.cc +++ b/src/graph/creators.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/creators.cc * @brief Functions for constructing graphs. diff --git a/src/graph/gk_ops.cc b/src/graph/gk_ops.cc index caa18a434e43..fdec5de8c0f9 100644 --- a/src/graph/gk_ops.cc +++ b/src/graph/gk_ops.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/gk_ops.cc * @brief Graph operation implemented in GKlib @@ -14,7 +14,7 @@ namespace dgl { #if !defined(_WIN32) -/*! +/** * Convert DGL CSR to GKLib CSR. * GKLib CSR actually stores a CSR object and a CSC object of a graph. * @param mat the DGL CSR matrix. @@ -60,7 +60,7 @@ gk_csr_t *Convert2GKCsr(const aten::CSRMatrix mat, bool is_row) { return gk_csr; } -/*! +/** * Convert GKLib CSR to DGL CSR. * GKLib CSR actually stores a CSR object and a CSC object of a graph. * @param gk_csr the GKLib CSR. diff --git a/src/graph/graph.cc b/src/graph/graph.cc index 007731ecb2a5..37dad8fb9ed0 100644 --- a/src/graph/graph.cc +++ b/src/graph/graph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/graph.cc * @brief DGL graph index implementation diff --git a/src/graph/graph_apis.cc b/src/graph/graph_apis.cc index e9b10b88773a..c369adc8f023 100644 --- a/src/graph/graph_apis.cc +++ b/src/graph/graph_apis.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/graph.cc * @brief DGL graph index APIs diff --git a/src/graph/graph_op.cc b/src/graph/graph_op.cc index 64f983ec3176..95cedb2629bd 100644 --- a/src/graph/graph_op.cc +++ b/src/graph/graph_op.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/graph.cc * @brief Graph operation implementation diff --git a/src/graph/graph_traversal.cc b/src/graph/graph_traversal.cc index 6fb88ac0472c..9ede071ddaf3 100644 --- a/src/graph/graph_traversal.cc +++ b/src/graph/graph_traversal.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/traversal.cc * @brief Graph traversal implementation diff --git a/src/graph/heterograph.cc b/src/graph/heterograph.cc index 100d9b0ef27a..0a1075e7a4f5 100644 --- a/src/graph/heterograph.cc +++ b/src/graph/heterograph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/heterograph.cc * @brief Heterograph implementation diff --git a/src/graph/heterograph.h b/src/graph/heterograph.h index d33d593de40f..b9045de4a8a0 100644 --- a/src/graph/heterograph.h +++ b/src/graph/heterograph.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/heterograph.h * @brief Heterograph @@ -21,7 +21,7 @@ namespace dgl { -/*! @brief Heterograph */ +/** @brief Heterograph */ class HeteroGraph : public BaseHeteroGraph { public: HeteroGraph( @@ -219,20 +219,20 @@ class HeteroGraph : public BaseHeteroGraph { GraphPtr AsImmutableGraph() const override; - /*! @return Load HeteroGraph from stream, using CSRMatrix*/ + /** @return Load HeteroGraph from stream, using CSRMatrix*/ bool Load(dmlc::Stream* fs); - /*! @return Save HeteroGraph to stream, using CSRMatrix */ + /** @return Save HeteroGraph to stream, using CSRMatrix */ void Save(dmlc::Stream* fs) const; - /*! @brief Convert the graph to use the given number of bits for storage */ + /** @brief Convert the graph to use the given number of bits for storage */ static HeteroGraphPtr AsNumBits(HeteroGraphPtr g, uint8_t bits); - /*! @brief Copy the data to another context */ + /** @brief Copy the data to another context */ static HeteroGraphPtr CopyTo(HeteroGraphPtr g, const DGLContext &ctx); - /*! + /** * @brief Pin all relation graphs of the current graph. * @note The graph will be pinned inplace. Behavior depends on the current context, * kDGLCPU: will be pinned; @@ -242,7 +242,7 @@ class HeteroGraph : public BaseHeteroGraph { */ void PinMemory_() override; - /*! + /** * @brief Unpin all relation graphs of the current graph. * @note The graph will be unpinned inplace. Behavior depends on the current context, * IsPinned: will be unpinned; @@ -251,13 +251,13 @@ class HeteroGraph : public BaseHeteroGraph { */ void UnpinMemory_(); - /*! + /** * @brief Record stream for this graph. * @param stream The stream that is using the graph */ void RecordStream(DGLStreamHandle stream) override; - /*! @brief Copy the data to shared memory. + /** @brief Copy the data to shared memory. * * Also save names of node types and edge types of the HeteroGraph object to shared memory */ @@ -265,13 +265,13 @@ class HeteroGraph : public BaseHeteroGraph { HeteroGraphPtr g, const std::string& name, const std::vector& ntypes, const std::vector& etypes, const std::set& fmts); - /*! @brief Create a heterograph from + /** @brief Create a heterograph from * \return the HeteroGraphPtr, names of node types, names of edge types */ static std::tuple, std::vector> CreateFromSharedMem(const std::string &name); - /*! @brief Creat a LineGraph of self */ + /** @brief Creat a LineGraph of self */ HeteroGraphPtr LineGraph(bool backtracking) const; const std::vector& relation_graphs() const { @@ -285,19 +285,19 @@ class HeteroGraph : public BaseHeteroGraph { // Empty Constructor, only for serializer HeteroGraph() : BaseHeteroGraph() {} - /*! @brief A map from edge type to unit graph */ + /** @brief A map from edge type to unit graph */ std::vector relation_graphs_; - /*! @brief A map from vert type to the number of verts in the type */ + /** @brief A map from vert type to the number of verts in the type */ std::vector num_verts_per_type_; - /*! @brief The shared memory object for meta info*/ + /** @brief The shared memory object for meta info*/ std::shared_ptr shared_mem_; - /*! @brief The name of the shared memory. Return empty string if it is not in shared memory. */ + /** @brief The name of the shared memory. Return empty string if it is not in shared memory. */ std::string SharedMemName() const; - /*! @brief template class for Flatten operation + /** @brief template class for Flatten operation * * @tparam IdType Graph's index data type, can be int32_t or int64_t * @param etypes vector of etypes to be falttened diff --git a/src/graph/heterograph_capi.cc b/src/graph/heterograph_capi.cc index 603ca1614ae5..93b579b00239 100644 --- a/src/graph/heterograph_capi.cc +++ b/src/graph/heterograph_capi.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/heterograph_capi.cc * @brief Heterograph CAPI bindings. diff --git a/src/graph/immutable_graph.cc b/src/graph/immutable_graph.cc index c345a779bcd8..7afb65d1fe62 100644 --- a/src/graph/immutable_graph.cc +++ b/src/graph/immutable_graph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/immutable_graph.cc * @brief DGL immutable graph index implementation @@ -29,7 +29,7 @@ inline std::string GetSharedMemName( return name + "_" + edge_dir; } -/* +/** * The metadata of a graph index that are needed for shared-memory graph. */ struct GraphIndexMetadata { @@ -40,7 +40,7 @@ struct GraphIndexMetadata { bool has_coo; }; -/* +/** * Serialize the metadata of a graph index and place it in a shared-memory * tensor. In this way, another process can reconstruct a GraphIndex from a * shared-memory tensor. @@ -65,7 +65,7 @@ NDArray SerializeMetadata(ImmutableGraphPtr gidx, const std::string &name) { #endif // _WIN32 } -/* +/** * Deserialize the metadata of a graph index. */ GraphIndexMetadata DeserializeMetadata(const std::string &name) { @@ -430,7 +430,7 @@ CSRPtr ImmutableGraph::GetInCSR() const { return in_csr_; } -/* !\brief Return out csr. If not exist, transpose the other one.*/ +/** @brief Return out csr. If not exist, transpose the other one.*/ CSRPtr ImmutableGraph::GetOutCSR() const { if (!out_csr_) { if (in_csr_) { @@ -447,7 +447,7 @@ CSRPtr ImmutableGraph::GetOutCSR() const { return out_csr_; } -/* !\brief Return coo. If not exist, create from csr.*/ +/** @brief Return coo. If not exist, create from csr.*/ COOPtr ImmutableGraph::GetCOO() const { if (!coo_) { if (in_csr_) { @@ -626,7 +626,7 @@ ImmutableGraphPtr ImmutableGraph::Reverse() const { constexpr uint64_t kDGLSerialize_ImGraph = 0xDD3c5FFE20046ABF; -/*! @return Load HeteroGraph from stream, using OutCSR Matrix*/ +/** @return Load HeteroGraph from stream, using OutCSR Matrix*/ bool ImmutableGraph::Load(dmlc::Stream *fs) { uint64_t magicNum; aten::CSRMatrix out_csr_matrix; @@ -637,7 +637,7 @@ bool ImmutableGraph::Load(dmlc::Stream *fs) { return true; } -/*! @return Save HeteroGraph to stream, using OutCSR Matrix */ +/** @return Save HeteroGraph to stream, using OutCSR Matrix */ void ImmutableGraph::Save(dmlc::Stream *fs) const { fs->Write(kDGLSerialize_ImGraph); fs->Write(GetOutCSR()); diff --git a/src/graph/metis_partition.cc b/src/graph/metis_partition.cc index 83ddde9eac8c..83cf833047ac 100644 --- a/src/graph/metis_partition.cc +++ b/src/graph/metis_partition.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/metis_partition.cc * @brief Call Metis partitioning diff --git a/src/graph/network.cc b/src/graph/network.cc index a8eff24c7d1b..1716416d2b15 100644 --- a/src/graph/network.cc +++ b/src/graph/network.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018-2022 by Contributors * @file graph/network.cc * @brief DGL networking related APIs diff --git a/src/graph/network.h b/src/graph/network.h index 8a2771334adb..7a3396e3d74d 100644 --- a/src/graph/network.h +++ b/src/graph/network.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/network.h * @brief DGL networking related APIs @@ -21,70 +21,70 @@ using dgl::runtime::NDArray; namespace dgl { namespace network { -/*! +/** * @brief Create NDArray from raw data */ NDArray CreateNDArrayFromRaw( std::vector shape, DGLDataType dtype, DGLContext ctx, void* raw); -/*! +/** * @brief Message type for DGL distributed training */ enum MessageType { - /*! + /** * @brief Message for send/recv NodeFlow */ kNodeFlowMsg = 0, - /*! + /** * @brief Message for end-signal */ kFinalMsg = 1, - /*! + /** * @brief Initialize KVStore */ kInitMsg = 2, - /*! + /** * @brief Push msg to KVStore */ kPushMsg = 3, - /*! + /** * @brief Pull msg from KVStore */ kPullMsg = 4, - /*! + /** * @brief PullBack msg from KVStore */ kPullBackMsg = 5, - /*! + /** * @brief Barrier msg for KVStore */ kBarrierMsg = 6, - /*! + /** * @brief IP and ID msg for KVStore */ kIPIDMsg = 7, - /*! + /** * @brief Get data shape msg for KVStore */ kGetShapeMsg = 8, - /*! + /** * @brief Get data shape back msg for KVStore */ kGetShapeBackMsg = 9 }; -/*! +/** * @brief Meta data for NDArray message */ class ArrayMeta { public: - /*! + /** * @brief ArrayMeta constructor. * @param msg_type type of message */ explicit ArrayMeta(int msg_type) : msg_type_(msg_type), ndarray_count_(0) {} - /*! + /** * @brief Construct ArrayMeta from binary data buffer. * @param buffer data buffer * @param size data size @@ -94,69 +94,69 @@ class ArrayMeta { this->Deserialize(buffer, size); } - /*! + /** * @return message type */ inline int msg_type() const { return msg_type_; } - /*! + /** * @return count of ndarray */ inline int ndarray_count() const { return ndarray_count_; } - /*! + /** * @brief Add NDArray meta data to ArrayMeta * @param array DGL NDArray */ void AddArray(const NDArray& array); - /*! + /** * @brief Serialize ArrayMeta to data buffer * @param size size of serialized message * @return pointer of data buffer */ char* Serialize(int64_t* size); - /*! + /** * @brief Deserialize ArrayMeta from data buffer * @param buffer data buffer * @param size size of data buffer */ void Deserialize(char* buffer, int64_t size); - /*! + /** * @brief type of message */ int msg_type_; - /*! + /** * @brief count of ndarray in MetaMsg */ int ndarray_count_; - /*! + /** * @brief DataType for each NDArray */ std::vector data_type_; - /*! + /** * @brief We first write the ndim to data_shape_ * and then write the data shape. */ std::vector data_shape_; }; -/*! +/** * @brief C structure for holding DGL KVServer message */ class KVStoreMsg { public: - /*! + /** * @brief KVStoreMsg constructor. */ KVStoreMsg() {} - /*! + /** * @brief Construct KVStoreMsg from binary data buffer. * @param buffer data buffer * @param size data size @@ -165,7 +165,7 @@ class KVStoreMsg { CHECK_NOTNULL(buffer); this->Deserialize(buffer, size); } - /*! + /** * @brief Serialize KVStoreMsg to data buffer * Note that we don't serialize ID and data here. * @param size size of serialized message @@ -173,34 +173,34 @@ class KVStoreMsg { */ char* Serialize(int64_t* size); - /*! + /** * @brief Deserialize KVStoreMsg from data buffer * @param buffer data buffer * @param size size of data buffer */ void Deserialize(char* buffer, int64_t size); - /*! + /** * @brief Message type of kvstore */ int msg_type; - /*! + /** * @brief Sender's ID */ int rank; - /*! + /** * @brief data name */ std::string name; - /*! + /** * @brief data ID */ NDArray id; - /*! + /** * @brief data matrix */ NDArray data; - /*! + /** * @brief data shape */ NDArray shape; diff --git a/src/graph/nodeflow.cc b/src/graph/nodeflow.cc index 3369ab3669e4..e43af6b876f3 100644 --- a/src/graph/nodeflow.cc +++ b/src/graph/nodeflow.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/nodeflow.cc * @brief DGL NodeFlow related functions. diff --git a/src/graph/pickle.cc b/src/graph/pickle.cc index de7e7cafaf3f..8d202721b4e6 100644 --- a/src/graph/pickle.cc +++ b/src/graph/pickle.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/pickle.cc * @brief Functions for pickle and unpickle a graph diff --git a/src/graph/sampler.cc b/src/graph/sampler.cc index 73dcdeeddeec..21d5cfe7845a 100644 --- a/src/graph/sampler.cc +++ b/src/graph/sampler.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/sampler.cc * @brief DGL sampler implementation @@ -24,7 +24,7 @@ using namespace dgl::runtime; namespace dgl { namespace { -/* +/** * ArrayHeap is used to sample elements from vector */ template @@ -49,7 +49,7 @@ class ArrayHeap { } ~ArrayHeap() {} - /* + /** * Remove term from index (this costs O(log m) steps) */ void Delete(size_t index) { @@ -64,7 +64,7 @@ class ArrayHeap { } } - /* + /** * Add value w to index (this costs O(log m) steps) */ void Add(size_t index, ValueType w) { @@ -75,7 +75,7 @@ class ArrayHeap { } } - /* + /** * Sample from arrayHeap */ size_t Sample() { @@ -92,7 +92,7 @@ class ArrayHeap { return i - limit_; } - /* + /** * Sample a vector by given the size n */ size_t SampleWithoutReplacement(size_t n, std::vector *samples) { @@ -175,7 +175,7 @@ class EdgeSamplerObject : public Object { int64_t chunk_size_; }; -/* +/** * Uniformly sample integers from [0, set_size) without replacement. */ void RandomSample(size_t set_size, size_t num, std::vector *out) { @@ -221,7 +221,7 @@ void RandomSample( } } -/* +/** * For a sparse array whose non-zeros are represented by nz_idxs, * negate the sparse array and outputs the non-zeros in the negated array. */ @@ -244,7 +244,7 @@ void NegateArray( } } -/* +/** * Uniform sample vertices from a list of vertices. */ void GetUniformSample( @@ -281,7 +281,7 @@ void GetUniformSample( } } -/* +/** * Non-uniform sample via ArrayHeap * * @param probability Transition probability on the entire graph, indexed by @@ -318,7 +318,7 @@ void GetNonUniformSample( sort(out_edge->begin(), out_edge->end()); } -/* +/** * Used for subgraph sampling */ struct neigh_list { @@ -643,7 +643,7 @@ void ConstructLayers( const std::vector &seed_array, IdArray layer_sizes, std::vector *layer_offsets, std::vector *node_mapping, std::vector *actl_layer_sizes, std::vector *probabilities) { - /* + /** * Given a graph and a collection of seed nodes, this function constructs * NodeFlow layers via uniform layer-wise sampling, and return the resultant * layers and their corresponding probabilities. @@ -705,7 +705,7 @@ void ConstructFlows( std::vector *sub_indptr, std::vector *sub_indices, std::vector *sub_eids, std::vector *flow_offsets, std::vector *edge_mapping) { - /* + /** * Given a graph and a sequence of NodeFlow layers, this function constructs * dense subgraphs (flows) between consecutive layers. */ diff --git a/src/graph/sampling/negative/global_uniform.cc b/src/graph/sampling/negative/global_uniform.cc index 7928249da34a..0a40adf1a772 100644 --- a/src/graph/sampling/negative/global_uniform.cc +++ b/src/graph/sampling/negative/global_uniform.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/sampling/negative/global_uniform.cc * @brief Global uniform negative sampling. diff --git a/src/graph/sampling/neighbor/neighbor.cc b/src/graph/sampling/neighbor/neighbor.cc index 9dfb3b53734a..8cd023a1adcc 100644 --- a/src/graph/sampling/neighbor/neighbor.cc +++ b/src/graph/sampling/neighbor/neighbor.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020-2021 by Contributors * @file graph/sampling/neighbor.cc * @brief Definition of neighborhood-based sampler APIs. diff --git a/src/graph/sampling/randomwalks/frequency_hashmap.cu b/src/graph/sampling/randomwalks/frequency_hashmap.cu index 3d21c74737bf..c5d8e787fbdf 100644 --- a/src/graph/sampling/randomwalks/frequency_hashmap.cu +++ b/src/graph/sampling/randomwalks/frequency_hashmap.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/sampling/frequency_hashmap.cu * @brief frequency hashmap - used to select top-k frequency edges of each node diff --git a/src/graph/sampling/randomwalks/frequency_hashmap.cuh b/src/graph/sampling/randomwalks/frequency_hashmap.cuh index 3cd7dc6e6550..3c1a1c0649b6 100644 --- a/src/graph/sampling/randomwalks/frequency_hashmap.cuh +++ b/src/graph/sampling/randomwalks/frequency_hashmap.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/sampling/frequency_hashmap.cuh * @brief frequency hashmap - used to select top-k frequency edges of each node diff --git a/src/graph/sampling/randomwalks/get_node_types_cpu.cc b/src/graph/sampling/randomwalks/get_node_types_cpu.cc index d754bb1878c3..0010b9698e27 100644 --- a/src/graph/sampling/randomwalks/get_node_types_cpu.cc +++ b/src/graph/sampling/randomwalks/get_node_types_cpu.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/sampling/get_node_types_cpu.cc * @brief DGL sampler - CPU implementation of random walks with OpenMP diff --git a/src/graph/sampling/randomwalks/get_node_types_gpu.cu b/src/graph/sampling/randomwalks/get_node_types_gpu.cu index 5fe65a431c98..79e8d2596e9a 100644 --- a/src/graph/sampling/randomwalks/get_node_types_gpu.cu +++ b/src/graph/sampling/randomwalks/get_node_types_gpu.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/sampling/get_node_types_gpu.cu * @brief DGL sampler diff --git a/src/graph/sampling/randomwalks/metapath_randomwalk.h b/src/graph/sampling/randomwalks/metapath_randomwalk.h index d6865308d0f5..72fa31b280bf 100644 --- a/src/graph/sampling/randomwalks/metapath_randomwalk.h +++ b/src/graph/sampling/randomwalks/metapath_randomwalk.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/sampler/generic_randomwalk_cpu.h * @brief DGL sampler - templated implementation definition of random walks on @@ -37,7 +37,7 @@ namespace { template using TerminatePredicate = std::function; -/*! +/** * @brief Select one successor of metapath-based random walk, given the path * generated so far. * @@ -103,7 +103,7 @@ std::tuple MetapathRandomWalkStep( return std::make_tuple(succ[idx], eid, terminate(data, curr, len)); } -/*! +/** * @brief Select one successor of metapath-based random walk, given the path * generated so far specifically for the uniform probability distribution. * @@ -154,7 +154,7 @@ std::tuple MetapathRandomWalkStepUniform( return std::make_tuple(succ[idx], eid, terminate(data, curr, len)); } -/*! +/** * @brief Metapath-based random walk. * @param hg The heterograph. * @param seeds A 1D array of seed nodes, with the type the source type of the diff --git a/src/graph/sampling/randomwalks/node2vec.cc b/src/graph/sampling/randomwalks/node2vec.cc index ca15ed7ebebf..6ac040f20b98 100644 --- a/src/graph/sampling/randomwalks/node2vec.cc +++ b/src/graph/sampling/randomwalks/node2vec.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/sampling/node2vec.cc * @brief Dispatcher of DGL node2vec random walks diff --git a/src/graph/sampling/randomwalks/node2vec_cpu.cc b/src/graph/sampling/randomwalks/node2vec_cpu.cc index 3075582b964d..158c5d81417e 100644 --- a/src/graph/sampling/randomwalks/node2vec_cpu.cc +++ b/src/graph/sampling/randomwalks/node2vec_cpu.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/sampling/node2vec_cpu.cc * @brief DGL sampler - CPU implementation of node2vec random walk with OpenMP diff --git a/src/graph/sampling/randomwalks/node2vec_impl.h b/src/graph/sampling/randomwalks/node2vec_impl.h index fdd2feb679d1..4edda8f70fe9 100644 --- a/src/graph/sampling/randomwalks/node2vec_impl.h +++ b/src/graph/sampling/randomwalks/node2vec_impl.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/sampling/node2vec_impl.h * @brief DGL sampler - templated implementation definition of node2vec random @@ -25,7 +25,7 @@ namespace sampling { namespace impl { -/*! +/** * @brief Node2vec random walk. * @param hg The heterograph. * @param seeds A 1D array of seed nodes, with the type the source type of the diff --git a/src/graph/sampling/randomwalks/node2vec_randomwalk.h b/src/graph/sampling/randomwalks/node2vec_randomwalk.h index 2261634a54ee..b1205320d111 100644 --- a/src/graph/sampling/randomwalks/node2vec_randomwalk.h +++ b/src/graph/sampling/randomwalks/node2vec_randomwalk.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/sampling/node2vec_randomwalk.cc * @brief DGL sampler - CPU implementation of node2vec random walk. @@ -46,7 +46,7 @@ bool has_edge_between(const CSRMatrix &csr, dgl_id_t u, dgl_id_t v) { return std::find(u_succ, u_succ + size, v) != u_succ + size; } -/*! +/** * @brief Node2vec random walk step function * @param data The path generated so far, of type \c IdxType. * @param curr The last node ID generated. diff --git a/src/graph/sampling/randomwalks/randomwalk_cpu.cc b/src/graph/sampling/randomwalks/randomwalk_cpu.cc index 7057463f54a4..3a73fbc45b5a 100644 --- a/src/graph/sampling/randomwalks/randomwalk_cpu.cc +++ b/src/graph/sampling/randomwalks/randomwalk_cpu.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/sampling/randomwalk_cpu.cc * @brief DGL sampler - CPU implementation of metapath-based random walk with diff --git a/src/graph/sampling/randomwalks/randomwalk_gpu.cu b/src/graph/sampling/randomwalks/randomwalk_gpu.cu index 0d60eb489e58..2687bfe60396 100644 --- a/src/graph/sampling/randomwalks/randomwalk_gpu.cu +++ b/src/graph/sampling/randomwalks/randomwalk_gpu.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021-2022 by Contributors * @file graph/sampling/randomwalk_gpu.cu * @brief CUDA random walk sampleing diff --git a/src/graph/sampling/randomwalks/randomwalk_with_restart_cpu.cc b/src/graph/sampling/randomwalks/randomwalk_with_restart_cpu.cc index 1b65919a7ac8..9358dc445883 100644 --- a/src/graph/sampling/randomwalks/randomwalk_with_restart_cpu.cc +++ b/src/graph/sampling/randomwalks/randomwalk_with_restart_cpu.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/sampling/randomwalk_with_restart_cpu.cc * @brief DGL sampler - CPU implementation of metapath-based random walk with diff --git a/src/graph/sampling/randomwalks/randomwalks.cc b/src/graph/sampling/randomwalks/randomwalks.cc index 2116f18fd9c7..b499b8f98897 100644 --- a/src/graph/sampling/randomwalks/randomwalks.cc +++ b/src/graph/sampling/randomwalks/randomwalks.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/sampling/randomwalks.cc * @brief Dispatcher of different DGL random walks by device type diff --git a/src/graph/sampling/randomwalks/randomwalks_cpu.h b/src/graph/sampling/randomwalks/randomwalks_cpu.h index 489d7af295e7..7504876dc197 100644 --- a/src/graph/sampling/randomwalks/randomwalks_cpu.h +++ b/src/graph/sampling/randomwalks/randomwalks_cpu.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/sampler/generic_randomwalk_cpu.h * @brief DGL sampler - templated implementation definition of random walks on @@ -28,7 +28,7 @@ namespace impl { namespace { -/*! +/** * @brief Generic Random Walk. * @param seeds A 1D array of seed nodes, with the type the source type of the * first edge type in the metapath. \param max_num_steps The maximum number of diff --git a/src/graph/sampling/randomwalks/randomwalks_impl.h b/src/graph/sampling/randomwalks/randomwalks_impl.h index 779a21d13433..be99d33cdcbf 100644 --- a/src/graph/sampling/randomwalks/randomwalks_impl.h +++ b/src/graph/sampling/randomwalks/randomwalks_impl.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/sampling/randomwalks_impl.h * @brief DGL sampler - templated implementation definition of random walks @@ -24,7 +24,7 @@ namespace sampling { namespace impl { -/*! +/** * @brief Random walk step function */ template @@ -35,7 +35,7 @@ using StepFunc = std::function< dgl_id_t, // last node ID int64_t)>; // # of steps -/*! +/** * @brief Get the node types traversed by the metapath. * @return A 1D array of shape (len(metapath) + 1,) with node type IDs. */ @@ -43,7 +43,7 @@ template TypeArray GetNodeTypesFromMetapath( const HeteroGraphPtr hg, const TypeArray metapath); -/*! +/** * @brief Metapath-based random walk. * @param hg The heterograph. * @param seeds A 1D array of seed nodes, with the type the source type of the @@ -62,7 +62,7 @@ std::pair RandomWalk( const HeteroGraphPtr hg, const IdArray seeds, const TypeArray metapath, const std::vector &prob); -/*! +/** * @brief Metapath-based random walk with restart probability. * @param hg The heterograph. * @param seeds A 1D array of seed nodes, with the type the source type of the @@ -82,7 +82,7 @@ std::pair RandomWalkWithRestart( const HeteroGraphPtr hg, const IdArray seeds, const TypeArray metapath, const std::vector &prob, double restart_prob); -/*! +/** * @brief Metapath-based random walk with stepwise restart probability. Useful * for PinSAGE-like models. * @param hg The heterograph. diff --git a/src/graph/serialize/dglgraph_data.h b/src/graph/serialize/dglgraph_data.h index 38daa9a626ee..de0a56e15b79 100644 --- a/src/graph/serialize/dglgraph_data.h +++ b/src/graph/serialize/dglgraph_data.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/serialize/dglgraph_data.h * @brief Graph serialization header @@ -56,7 +56,7 @@ class GraphData : public runtime::ObjectRef { public: DGL_DEFINE_OBJECT_REF_METHODS(GraphData, runtime::ObjectRef, GraphDataObject); - /*! @brief create a new GraphData reference */ + /** @brief create a new GraphData reference */ static GraphData Create() { return GraphData(std::make_shared()); } diff --git a/src/graph/serialize/dglgraph_serialize.cc b/src/graph/serialize/dglgraph_serialize.cc index fd35ebc7145e..56142de261f3 100644 --- a/src/graph/serialize/dglgraph_serialize.cc +++ b/src/graph/serialize/dglgraph_serialize.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/serialize/graph_serialize.cc * @brief Graph serialization implementation diff --git a/src/graph/serialize/graph_serialize.cc b/src/graph/serialize/graph_serialize.cc index ec96bd16e1d1..a664d8ca516d 100644 --- a/src/graph/serialize/graph_serialize.cc +++ b/src/graph/serialize/graph_serialize.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/serialize/graph_serialize.cc * @brief Graph serialization implementation diff --git a/src/graph/serialize/graph_serialize.h b/src/graph/serialize/graph_serialize.h index 08f5ea2a63d7..d8a5faea9c23 100644 --- a/src/graph/serialize/graph_serialize.h +++ b/src/graph/serialize/graph_serialize.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/serialize/graph_serialize.h * @brief Graph serialization header @@ -76,7 +76,7 @@ class StorageMetaData : public runtime::ObjectRef { DGL_DEFINE_OBJECT_REF_METHODS( StorageMetaData, runtime::ObjectRef, StorageMetaDataObject); - /*! @brief create a new StorageMetaData reference */ + /** @brief create a new StorageMetaData reference */ static StorageMetaData Create() { return StorageMetaData(std::make_shared()); } diff --git a/src/graph/serialize/heterograph_data.h b/src/graph/serialize/heterograph_data.h index f459db9e1239..80116e253af2 100644 --- a/src/graph/serialize/heterograph_data.h +++ b/src/graph/serialize/heterograph_data.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/serialize/heterograph_data.h * @brief Graph serialization header @@ -97,7 +97,7 @@ class HeteroGraphData : public runtime::ObjectRef { DGL_DEFINE_OBJECT_REF_METHODS( HeteroGraphData, runtime::ObjectRef, HeteroGraphDataObject); - /*! @brief create a new GraphData reference */ + /** @brief create a new GraphData reference */ static HeteroGraphData Create( HeteroGraphPtr gptr, List> node_tensors, List> edge_tensors, List ntype_names, @@ -106,7 +106,7 @@ class HeteroGraphData : public runtime::ObjectRef { gptr, node_tensors, edge_tensors, ntype_names, etype_names)); } - /*! @brief create an empty GraphData reference */ + /** @brief create an empty GraphData reference */ static HeteroGraphData Create() { return HeteroGraphData(std::make_shared()); } diff --git a/src/graph/serialize/heterograph_serialize.cc b/src/graph/serialize/heterograph_serialize.cc index e148ad012ddc..78774b2249c2 100644 --- a/src/graph/serialize/heterograph_serialize.cc +++ b/src/graph/serialize/heterograph_serialize.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/serialize/heterograph_serialize.cc * @brief DGLHeteroGraph serialization implementation diff --git a/src/graph/serialize/tensor_serialize.cc b/src/graph/serialize/tensor_serialize.cc index ea4693c1cd1e..fcfdbf108157 100644 --- a/src/graph/serialize/tensor_serialize.cc +++ b/src/graph/serialize/tensor_serialize.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/serialize/tensor_serialize.cc * @brief Graph serialization implementation diff --git a/src/graph/serialize/zerocopy_serializer.cc b/src/graph/serialize/zerocopy_serializer.cc index 25ec702fa49d..0cec85588e7e 100644 --- a/src/graph/serialize/zerocopy_serializer.cc +++ b/src/graph/serialize/zerocopy_serializer.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020-2022 by Contributors * @file graph/serailize/zerocopy_serializer.cc * @brief serializer implementation. diff --git a/src/graph/shared_mem_manager.cc b/src/graph/shared_mem_manager.cc index 79172d7e1e32..a876a1642343 100644 --- a/src/graph/shared_mem_manager.cc +++ b/src/graph/shared_mem_manager.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/shared_mem_manager.cc * @brief DGL sampler implementation diff --git a/src/graph/shared_mem_manager.h b/src/graph/shared_mem_manager.h index ea71958a8b28..d969b9c9477a 100644 --- a/src/graph/shared_mem_manager.h +++ b/src/graph/shared_mem_manager.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/shared_mem_manager.cc * @brief DGL shared mem manager APIs diff --git a/src/graph/subgraph.cc b/src/graph/subgraph.cc index 796bf0740427..e7b3a4fa7600 100644 --- a/src/graph/subgraph.cc +++ b/src/graph/subgraph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/subgraph.cc * @brief Functions for extracting subgraphs. diff --git a/src/graph/transform/compact.cc b/src/graph/transform/compact.cc index 9ae57f041bf5..80c3594dbc55 100644 --- a/src/graph/transform/compact.cc +++ b/src/graph/transform/compact.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright 2019-2021 Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/graph/transform/compact.h b/src/graph/transform/compact.h index 0196b74c2cdb..48d0b9fc6f54 100644 --- a/src/graph/transform/compact.h +++ b/src/graph/transform/compact.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright 2021 Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/graph/transform/cpu/kdtree_ndarray_adapter.h b/src/graph/transform/cpu/kdtree_ndarray_adapter.h index 286fa6f30d19..f2cb2d2015bd 100644 --- a/src/graph/transform/cpu/kdtree_ndarray_adapter.h +++ b/src/graph/transform/cpu/kdtree_ndarray_adapter.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/transform/cpu/kdtree_ndarray_adapter.h * @brief NDArray adapter for nanoflann, without @@ -18,7 +18,7 @@ namespace dgl { namespace transform { namespace knn_utils { -/*! +/** * @brief A simple 2D NDArray adapter for nanoflann, without duplicating the * storage. * @@ -65,7 +65,7 @@ class KDTreeNDArrayAdapter { index_type* GetIndex() { return index_; } - /*! + /** * @brief Query for the \a num_closest points to a given point * Note that this is a short-cut method for GetIndex()->findNeighbors(). */ @@ -77,19 +77,19 @@ class KDTreeNDArrayAdapter { index_->findNeighbors(resultSet, query_pt, nanoflann::SearchParams()); } - /*! @brief Interface expected by KDTreeSingleIndexAdaptor */ + /** @brief Interface expected by KDTreeSingleIndexAdaptor */ const self_type& derived() const { return *this; } - /*! @brief Interface expected by KDTreeSingleIndexAdaptor */ + /** @brief Interface expected by KDTreeSingleIndexAdaptor */ self_type& derived() { return *this; } - /*! + /** * @brief Interface expected by KDTreeSingleIndexAdaptor, * return the number of data points */ size_t kdtree_get_point_count() const { return data_->shape[0]; } - /*! + /** * @brief Interface expected by KDTreeSingleIndexAdaptor, * return the dim'th component of the idx'th point */ @@ -97,7 +97,7 @@ class KDTreeNDArrayAdapter { return data_.Ptr()[idx * data_->shape[1] + dim]; } - /*! + /** * @brief Interface expected by KDTreeSingleIndexAdaptor. * Optional bounding-box computation: return false to * default to a standard bbox computation loop. diff --git a/src/graph/transform/cpu/knn.cc b/src/graph/transform/cpu/knn.cc index ca99830b60d3..3821be632927 100644 --- a/src/graph/transform/cpu/knn.cc +++ b/src/graph/transform/cpu/knn.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/transform/cpu/knn.cc * @brief k-nearest-neighbor (KNN) implementation @@ -27,7 +27,7 @@ namespace impl { // This value is directly from pynndescent static constexpr int NN_DESCENT_BLOCK_SIZE = 16384; -/*! +/** * @brief Compute Euclidean distance between two vectors, return positive * infinite value if the intermediate distance is greater than the worst * distance. @@ -54,7 +54,7 @@ FloatType EuclideanDistWithCheck( } } -/*! @brief Compute Euclidean distance between two vectors */ +/** @brief Compute Euclidean distance between two vectors */ template FloatType EuclideanDist( const FloatType* vec1, const FloatType* vec2, int64_t dim) { @@ -67,7 +67,7 @@ FloatType EuclideanDist( return dist; } -/*! @brief Insert a new element into a heap */ +/** @brief Insert a new element into a heap */ template void HeapInsert( IdType* out, FloatType* dist, IdType new_id, FloatType new_dist, int k, @@ -104,7 +104,7 @@ void HeapInsert( } } -/*! @brief Insert a new element and its flag into heap, return 1 if insert +/** @brief Insert a new element and its flag into heap, return 1 if insert * successfully */ template int FlaggedHeapInsert( @@ -144,7 +144,7 @@ int FlaggedHeapInsert( return 1; } -/*! @brief Build heap for each point. Used by NN-descent */ +/** @brief Build heap for each point. Used by NN-descent */ template void BuildHeap(IdType* index, FloatType* dist, int k) { for (int i = k / 2 - 1; i >= 0; --i) { @@ -170,7 +170,7 @@ void BuildHeap(IdType* index, FloatType* dist, int k) { } } -/*! +/** * @brief Neighbor update process in NN-descent. The distance between * two points are computed. If this new distance is less than any worst * distance of these two points, we update the neighborhood of that point. @@ -208,7 +208,7 @@ int UpdateNeighbors( return num_updates; } -/*! @brief The kd-tree implementation of K-Nearest Neighbors */ +/** @brief The kd-tree implementation of K-Nearest Neighbors */ template void KdTreeKNN( const NDArray& data_points, const IdArray& data_offsets, diff --git a/src/graph/transform/cuda/cuda_compact_graph.cu b/src/graph/transform/cuda/cuda_compact_graph.cu index fa1193af2369..359da3f0d41e 100644 --- a/src/graph/transform/cuda/cuda_compact_graph.cu +++ b/src/graph/transform/cuda/cuda_compact_graph.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright 2021 Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/graph/transform/cuda/cuda_map_edges.cuh b/src/graph/transform/cuda/cuda_map_edges.cuh index b107d100a13e..99d6bb0f6d9c 100644 --- a/src/graph/transform/cuda/cuda_map_edges.cuh +++ b/src/graph/transform/cuda/cuda_map_edges.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright 2020-2021 Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/graph/transform/cuda/cuda_to_block.cu b/src/graph/transform/cuda/cuda_to_block.cu index 8f784dac3ea6..ce46719aec50 100644 --- a/src/graph/transform/cuda/cuda_to_block.cu +++ b/src/graph/transform/cuda/cuda_to_block.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright 2020-2021 Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/graph/transform/cuda/knn.cu b/src/graph/transform/cuda/knn.cu index fe4d903c6874..8ca21fe9886b 100644 --- a/src/graph/transform/cuda/knn.cu +++ b/src/graph/transform/cuda/knn.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/transform/cuda/knn.cu * @brief k-nearest-neighbor (KNN) implementation (cuda) @@ -22,7 +22,7 @@ namespace dgl { namespace transform { namespace impl { -/*! +/** * @brief Utility class used to avoid linker errors with extern * unsized shared memory arrays with templated type */ @@ -54,7 +54,7 @@ struct SharedMemory { } }; -/*! @brief Compute Euclidean distance between two vectors in a cuda kernel */ +/** @brief Compute Euclidean distance between two vectors in a cuda kernel */ template __device__ FloatType EuclideanDist(const FloatType* vec1, const FloatType* vec2, const int64_t dim) { @@ -77,7 +77,7 @@ EuclideanDist(const FloatType* vec1, const FloatType* vec2, const int64_t dim) { return dist; } -/*! +/** * @brief Compute Euclidean distance between two vectors in a cuda kernel, * return positive infinite value if the intermediate distance is greater * than the worst distance. @@ -238,7 +238,7 @@ __device__ bool FlaggedHeapInsert( return true; } -/*! +/** * @brief Brute force kNN kernel. Compute distance for each pair of input points * and get the result directly (without a distance matrix). */ @@ -278,7 +278,7 @@ __global__ void BruteforceKnnKernel( } } -/*! +/** * @brief Same as BruteforceKnnKernel, but use shared memory as buffer. * This kernel divides query points and data points into blocks. For each * query block, it will make a loop over all data blocks and compute distances. @@ -400,7 +400,7 @@ __global__ void BruteforceKnnShareKernel( } } -/*! @brief determine the number of blocks for each segment */ +/** @brief determine the number of blocks for each segment */ template __global__ void GetNumBlockPerSegment( const IdType* offsets, IdType* out, const int64_t batch_size, @@ -411,7 +411,7 @@ __global__ void GetNumBlockPerSegment( } } -/*! @brief Get the batch index and local index in segment for each block */ +/** @brief Get the batch index and local index in segment for each block */ template __global__ void GetBlockInfo( const IdType* num_block_prefixsum, IdType* block_batch_id, @@ -429,7 +429,7 @@ __global__ void GetBlockInfo( } } -/*! +/** * @brief Brute force kNN. Compute distance for each pair of input points and * get the result directly (without a distance matrix). * @@ -472,7 +472,7 @@ void BruteForceKNNCuda( device->FreeWorkspace(ctx, dists); } -/*! +/** * @brief Brute force kNN with shared memory. * This function divides query points and data points into blocks. For each * query block, it will make a loop over all data blocks and compute distances. @@ -575,7 +575,7 @@ void BruteForceKNNSharedCuda( device->FreeWorkspace(ctx, block_batch_id); } -/*! @brief Setup rng state for nn-descent */ +/** @brief Setup rng state for nn-descent */ __global__ void SetupRngKernel( curandState* states, const uint64_t seed, const size_t n) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; @@ -584,7 +584,7 @@ __global__ void SetupRngKernel( } } -/*! +/** * @brief Randomly initialize neighbors (sampling without replacement) * for each nodes */ @@ -636,7 +636,7 @@ __global__ void RandomInitNeighborsKernel( BuildHeap(neighbors + point_idx * k, current_dists, k); } -/*! +/** * @brief Randomly select candidates from current knn and reverse-knn graph for * nn-descent. */ @@ -735,7 +735,7 @@ __global__ void FindCandidatesKernel( } } -/*! @brief Update knn graph according to selected candidates for nn-descent */ +/** @brief Update knn graph according to selected candidates for nn-descent */ template __global__ void UpdateNeighborsKernel( const FloatType* points, const IdType* offsets, IdType* neighbors, diff --git a/src/graph/transform/knn.cc b/src/graph/transform/knn.cc index d2cce2dc7cc3..406e4f5829a8 100644 --- a/src/graph/transform/knn.cc +++ b/src/graph/transform/knn.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/transform/knn.cc * @brief k-nearest-neighbor (KNN) interface diff --git a/src/graph/transform/knn.h b/src/graph/transform/knn.h index 8cd74a82a635..b2ce5c46103a 100644 --- a/src/graph/transform/knn.h +++ b/src/graph/transform/knn.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file graph/transform/knn.h * @brief k-nearest-neighbor (KNN) implementation @@ -14,7 +14,7 @@ namespace dgl { namespace transform { -/*! +/** * @brief For each point in each segment in \a query_points, find \a k nearest * points in the same segment in \a data_points. \a data_offsets and \a * query_offsets determine the start index of each segment in \a @@ -35,7 +35,7 @@ void KNN( const NDArray& query_points, const IdArray& query_offsets, const int k, IdArray result, const std::string& algorithm); -/*! +/** * @brief For each input point, find \a k approximate nearest points in the same * segment using NN-descent algorithm. * diff --git a/src/graph/transform/line_graph.cc b/src/graph/transform/line_graph.cc index b0192cdb6e79..2b47f7a59633 100644 --- a/src/graph/transform/line_graph.cc +++ b/src/graph/transform/line_graph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/transform/line_graph.cc * @brief Line graph implementation @@ -22,7 +22,7 @@ using namespace dgl::aten; namespace transform { -/*! +/** * @brief Create Line Graph. * @param hg Graph. * @param backtracking whether the pair of (v, u) (u, v) edges are treated as diff --git a/src/graph/transform/metis_partition_hetero.cc b/src/graph/transform/metis_partition_hetero.cc index 57dc09421bed..f53c5c1a4eff 100644 --- a/src/graph/transform/metis_partition_hetero.cc +++ b/src/graph/transform/metis_partition_hetero.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/metis_partition.cc * @brief Call Metis partitioning diff --git a/src/graph/transform/partition_hetero.cc b/src/graph/transform/partition_hetero.cc index 8804ad0cb713..9669b8f85399 100644 --- a/src/graph/transform/partition_hetero.cc +++ b/src/graph/transform/partition_hetero.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/metis_partition.cc * @brief Call Metis partitioning diff --git a/src/graph/transform/remove_edges.cc b/src/graph/transform/remove_edges.cc index 1d8400a99895..181185d4114a 100644 --- a/src/graph/transform/remove_edges.cc +++ b/src/graph/transform/remove_edges.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/transform/remove_edges.cc * @brief Remove edges. diff --git a/src/graph/transform/to_bipartite.cc b/src/graph/transform/to_bipartite.cc index cebf15bd87b2..c92fd828c41f 100644 --- a/src/graph/transform/to_bipartite.cc +++ b/src/graph/transform/to_bipartite.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright 2019-2021 Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/graph/transform/to_bipartite.h b/src/graph/transform/to_bipartite.h index 622d67f826dd..581567fba903 100644 --- a/src/graph/transform/to_bipartite.h +++ b/src/graph/transform/to_bipartite.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright 2021 Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/graph/transform/to_simple.cc b/src/graph/transform/to_simple.cc index c873ecf550c7..1f2bd5c66bc3 100644 --- a/src/graph/transform/to_simple.cc +++ b/src/graph/transform/to_simple.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/transform/to_simple.cc * @brief Convert multigraphs to simple graphs diff --git a/src/graph/transform/union_partition.cc b/src/graph/transform/union_partition.cc index 8cc49b86621d..78b63c6a458f 100644 --- a/src/graph/transform/union_partition.cc +++ b/src/graph/transform/union_partition.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file graph/transform/union_partition.cc * @brief Functions for partition, union multiple graphs. diff --git a/src/graph/traversal.cc b/src/graph/traversal.cc index 020ac8c114f8..40c653ff3c62 100644 --- a/src/graph/traversal.cc +++ b/src/graph/traversal.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/traversal.cc * @brief Graph traversal implementation @@ -88,7 +88,7 @@ IdArray ComputeMergedSections(const std::vector>& traces) { } // namespace -/*! +/** * @brief Class for representing frontiers. * * Each frontier is a list of nodes/edges (specified by their ids). @@ -96,13 +96,13 @@ IdArray ComputeMergedSections(const std::vector>& traces) { * value). */ struct Frontiers { - /*!\brief a vector store for the nodes/edges in all the frontiers */ + /** @brief a vector store for the nodes/edges in all the frontiers */ std::vector ids; - /*!\brief a vector store for node/edge tags. Empty if no tags are requested */ + /** @brief a vector store for node/edge tags. Empty if no tags are requested */ std::vector tags; - /*!\brief a section vector to indicate each frontier */ + /** @brief a section vector to indicate each frontier */ std::vector sections; }; diff --git a/src/graph/traversal.h b/src/graph/traversal.h index c864b8bb1d55..82d7805497da 100644 --- a/src/graph/traversal.h +++ b/src/graph/traversal.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file graph/traversal.h * @brief Graph traversal routines. @@ -20,7 +20,7 @@ namespace dgl { namespace traverse { -/*! +/** * @brief Traverse the graph in a breadth-first-search (BFS) order. * * The queue object must suffice following interface: @@ -81,7 +81,7 @@ void BFSNodes( } } -/*! +/** * @brief Traverse the graph in a breadth-first-search (BFS) order, returning * the edges of the BFS tree. * @@ -145,7 +145,7 @@ void BFSEdges( } } -/*! +/** * @brief Traverse the graph in topological order. * * The queue object must suffice following interface: @@ -212,13 +212,13 @@ void TopologicalNodes( } } -/*!\brief Tags for ``DFSEdges``. */ +/** @brief Tags for ``DFSEdges``. */ enum DFSEdgeTag { kForward = 0, kReverse, kNonTree, }; -/*! +/** * @brief Traverse the graph in a depth-first-search (DFS) order. * * The traversal visit edges in its DFS order. Edges have three tags: diff --git a/src/graph/unit_graph.cc b/src/graph/unit_graph.cc index f2ff801da2aa..31b233dabf88 100644 --- a/src/graph/unit_graph.cc +++ b/src/graph/unit_graph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/unit_graph.cc * @brief UnitGraph graph implementation @@ -161,17 +161,17 @@ class UnitGraph::COO : public BaseHeteroGraph { } - /*! @brief Pin the adj_: COOMatrix of the COO graph. */ + /** @brief Pin the adj_: COOMatrix of the COO graph. */ void PinMemory_() { adj_.PinMemory_(); } - /*! @brief Unpin the adj_: COOMatrix of the COO graph. */ + /** @brief Unpin the adj_: COOMatrix of the COO graph. */ void UnpinMemory_() { adj_.UnpinMemory_(); } - /*! @brief Record stream for the adj_: COOMatrix of the COO graph. */ + /** @brief Record stream for the adj_: COOMatrix of the COO graph. */ void RecordStream(DGLStreamHandle stream) override { adj_.RecordStream(stream); } @@ -432,7 +432,7 @@ class UnitGraph::COO : public BaseHeteroGraph { return adj_; } - /*! + /** * @brief Determines whether the graph is "hypersparse", i.e. having significantly more * nodes than edges. */ @@ -457,7 +457,7 @@ class UnitGraph::COO : public BaseHeteroGraph { private: friend class Serializer; - /*! @brief internal adjacency matrix. Data array is empty */ + /** @brief internal adjacency matrix. Data array is empty */ aten::COOMatrix adj_; }; @@ -467,7 +467,7 @@ class UnitGraph::COO : public BaseHeteroGraph { // ////////////////////////////////////////////////////////// -/*! @brief CSR graph */ +/** @brief CSR graph */ class UnitGraph::CSR : public BaseHeteroGraph { public: CSR(GraphPtr metagraph, int64_t num_src, int64_t num_dst, @@ -571,17 +571,17 @@ class UnitGraph::CSR : public BaseHeteroGraph { } } - /*! @brief Pin the adj_: CSRMatrix of the CSR graph. */ + /** @brief Pin the adj_: CSRMatrix of the CSR graph. */ void PinMemory_() { adj_.PinMemory_(); } - /*! @brief Unpin the adj_: CSRMatrix of the CSR graph. */ + /** @brief Unpin the adj_: CSRMatrix of the CSR graph. */ void UnpinMemory_() { adj_.UnpinMemory_(); } - /*! @brief Record stream for the adj_: CSRMatrix of the CSR graph. */ + /** @brief Record stream for the adj_: CSRMatrix of the CSR graph. */ void RecordStream(DGLStreamHandle stream) override { adj_.RecordStream(stream); } @@ -851,7 +851,7 @@ class UnitGraph::CSR : public BaseHeteroGraph { private: friend class Serializer; - /*! @brief internal adjacency matrix. Data array stores edge ids */ + /** @brief internal adjacency matrix. Data array stores edge ids */ aten::CSRMatrix adj_; }; @@ -1433,7 +1433,7 @@ UnitGraph::CSRPtr UnitGraph::GetInCSR(bool inplace) const { return ret; } -/* !\brief Return out csr. If not exist, transpose the other one.*/ +/** @brief Return out csr. If not exist, transpose the other one.*/ UnitGraph::CSRPtr UnitGraph::GetOutCSR(bool inplace) const { if (inplace) if (!(formats_ & CSR_CODE)) @@ -1469,7 +1469,7 @@ UnitGraph::CSRPtr UnitGraph::GetOutCSR(bool inplace) const { return ret; } -/* !\brief Return coo. If not exist, create from csr.*/ +/** @brief Return coo. If not exist, create from csr.*/ UnitGraph::COOPtr UnitGraph::GetCOO(bool inplace) const { if (inplace) if (!(formats_ & COO_CODE)) diff --git a/src/graph/unit_graph.h b/src/graph/unit_graph.h index 7a7a8b9ce2fe..265d72631e6f 100644 --- a/src/graph/unit_graph.h +++ b/src/graph/unit_graph.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph/unit_graph.h * @brief UnitGraph graph @@ -26,7 +26,7 @@ class HeteroGraph; class UnitGraph; typedef std::shared_ptr UnitGraphPtr; -/*! +/** * @brief UnitGraph graph * * UnitGraph graph is a special type of heterograph which @@ -164,7 +164,7 @@ class UnitGraph : public BaseHeteroGraph { const std::vector& eids, bool preserve_nodes = false) const override; // creators - /*! @brief Create a graph with no edges */ + /** @brief Create a graph with no edges */ static HeteroGraphPtr Empty( int64_t num_vtypes, int64_t num_src, int64_t num_dst, DGLDataType dtype, DGLContext ctx) { @@ -173,7 +173,7 @@ class UnitGraph : public BaseHeteroGraph { return CreateFromCOO(num_vtypes, num_src, num_dst, row, col); } - /*! @brief Create a graph from COO arrays */ + /** @brief Create a graph from COO arrays */ static HeteroGraphPtr CreateFromCOO( int64_t num_vtypes, int64_t num_src, int64_t num_dst, IdArray row, IdArray col, bool row_sorted = false, @@ -183,7 +183,7 @@ class UnitGraph : public BaseHeteroGraph { int64_t num_vtypes, const aten::COOMatrix& mat, dgl_format_code_t formats = ALL_CODE); - /*! @brief Create a graph from (out) CSR arrays */ + /** @brief Create a graph from (out) CSR arrays */ static HeteroGraphPtr CreateFromCSR( int64_t num_vtypes, int64_t num_src, int64_t num_dst, IdArray indptr, IdArray indices, IdArray edge_ids, @@ -193,7 +193,7 @@ class UnitGraph : public BaseHeteroGraph { int64_t num_vtypes, const aten::CSRMatrix& mat, dgl_format_code_t formats = ALL_CODE); - /*! @brief Create a graph from (in) CSC arrays */ + /** @brief Create a graph from (in) CSC arrays */ static HeteroGraphPtr CreateFromCSC( int64_t num_vtypes, int64_t num_src, int64_t num_dst, IdArray indptr, IdArray indices, IdArray edge_ids, @@ -203,13 +203,13 @@ class UnitGraph : public BaseHeteroGraph { int64_t num_vtypes, const aten::CSRMatrix& mat, dgl_format_code_t formats = ALL_CODE); - /*! @brief Convert the graph to use the given number of bits for storage */ + /** @brief Convert the graph to use the given number of bits for storage */ static HeteroGraphPtr AsNumBits(HeteroGraphPtr g, uint8_t bits); - /*! @brief Copy the data to another context */ + /** @brief Copy the data to another context */ static HeteroGraphPtr CopyTo(HeteroGraphPtr g, const DGLContext &ctx); - /*! + /** * @brief Pin the in_csr_, out_scr_ and coo_ of the current graph. * @note The graph will be pinned inplace. Behavior depends on the current context, * kDGLCPU: will be pinned; @@ -219,7 +219,7 @@ class UnitGraph : public BaseHeteroGraph { */ void PinMemory_() override; - /*! + /** * @brief Unpin the in_csr_, out_scr_ and coo_ of the current graph. * @note The graph will be unpinned inplace. Behavior depends on the current context, * IsPinned: will be unpinned; @@ -228,13 +228,13 @@ class UnitGraph : public BaseHeteroGraph { */ void UnpinMemory_(); - /*! + /** * @brief Record stream for this graph. * @param stream The stream that is using the graph */ void RecordStream(DGLStreamHandle stream) override; - /*! + /** * @brief Create in-edge CSR format of the unit graph. * @param inplace if true and the in-edge CSR format does not exist, the created * format will be cached in this object unless the format is restricted. @@ -242,7 +242,7 @@ class UnitGraph : public BaseHeteroGraph { */ CSRPtr GetInCSR(bool inplace = true) const; - /*! + /** * @brief Create out-edge CSR format of the unit graph. * @param inplace if true and the out-edge CSR format does not exist, the created * format will be cached in this object unless the format is restricted. @@ -250,7 +250,7 @@ class UnitGraph : public BaseHeteroGraph { */ CSRPtr GetOutCSR(bool inplace = true) const; - /*! + /** * @brief Create COO format of the unit graph. * @param inplace if true and the COO format does not exist, the created * format will be cached in this object unless the format is restricted. @@ -258,20 +258,20 @@ class UnitGraph : public BaseHeteroGraph { */ COOPtr GetCOO(bool inplace = true) const; - /*! @return Return the COO matrix form */ + /** @return Return the COO matrix form */ aten::COOMatrix GetCOOMatrix(dgl_type_t etype) const override; - /*! @return Return the in-edge CSC in the matrix form */ + /** @return Return the in-edge CSC in the matrix form */ aten::CSRMatrix GetCSCMatrix(dgl_type_t etype) const override; - /*! @return Return the out-edge CSR in the matrix form */ + /** @return Return the out-edge CSR in the matrix form */ aten::CSRMatrix GetCSRMatrix(dgl_type_t etype) const override; SparseFormat SelectFormat(dgl_type_t etype, dgl_format_code_t preferred_formats) const override { return SelectFormat(preferred_formats); } - /*! + /** * @brief Return the graph in the given format. Perform format conversion if the * requested format does not exist. * @@ -285,19 +285,19 @@ class UnitGraph : public BaseHeteroGraph { HeteroGraphPtr GetGraphInFormat(dgl_format_code_t formats) const override; - /*! @return Load UnitGraph from stream, using CSRMatrix*/ + /** @return Load UnitGraph from stream, using CSRMatrix*/ bool Load(dmlc::Stream* fs); - /*! @return Save UnitGraph to stream, using CSRMatrix */ + /** @return Save UnitGraph to stream, using CSRMatrix */ void Save(dmlc::Stream* fs) const; - /*! @brief Creat a LineGraph of self */ + /** @brief Creat a LineGraph of self */ HeteroGraphPtr LineGraph(bool backtracking) const; - /*! @return the reversed graph */ + /** @return the reversed graph */ UnitGraphPtr Reverse() const; - /*! @return the simpled (no-multi-edge) graph + /** @return the simpled (no-multi-edge) graph * the count recording the number of duplicated edges from the original graph. * the edge mapping from the edge IDs of original graph to those of the * returned graph. @@ -319,7 +319,7 @@ class UnitGraph : public BaseHeteroGraph { // private empty constructor UnitGraph() {} - /*! + /** * @brief constructor * @param metagraph metagraph * @param in_csr in edge csr @@ -329,7 +329,7 @@ class UnitGraph : public BaseHeteroGraph { UnitGraph(GraphPtr metagraph, CSRPtr in_csr, CSRPtr out_csr, COOPtr coo, dgl_format_code_t formats = ALL_CODE); - /*! + /** * @brief constructor * @param num_vtypes number of vertex types (1 or 2) * @param metagraph metagraph @@ -350,10 +350,10 @@ class UnitGraph : public BaseHeteroGraph { bool has_coo, dgl_format_code_t formats = ALL_CODE); - /*! @return Return any existing format. */ + /** @return Return any existing format. */ HeteroGraphPtr GetAny() const; - /*! + /** * @brief Determine which format to use with a preference. * * If the storage of unit graph is "locked", i.e. no conversion is allowed, then @@ -364,24 +364,24 @@ class UnitGraph : public BaseHeteroGraph { */ SparseFormat SelectFormat(dgl_format_code_t preferred_formats) const; - /*! @return Whether the graph is hypersparse */ + /** @return Whether the graph is hypersparse */ bool IsHypersparse() const; GraphPtr AsImmutableGraph() const override; // Graph stored in different format. We use an on-demand strategy: the format is // only materialized if the operation that suitable for it is invoked. - /*! @brief CSR graph that stores reverse edges */ + /** @brief CSR graph that stores reverse edges */ CSRPtr in_csr_; - /*! @brief CSR representation */ + /** @brief CSR representation */ CSRPtr out_csr_; - /*! @brief COO representation */ + /** @brief COO representation */ COOPtr coo_; - /*! + /** * @brief Storage format restriction. */ dgl_format_code_t formats_; - /*! @brief which streams have recorded the graph */ + /** @brief which streams have recorded the graph */ std::vector recorded_streams; }; diff --git a/src/partition/cuda/partition_op.cu b/src/partition/cuda/partition_op.cu index 052cd1642b72..21c32d40a599 100644 --- a/src/partition/cuda/partition_op.cu +++ b/src/partition/cuda/partition_op.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file ndarray_partition.h * @brief Operations on partition implemented in CUDA. diff --git a/src/partition/ndarray_partition.cc b/src/partition/ndarray_partition.cc index bbd417765ec7..a4a039ed455e 100644 --- a/src/partition/ndarray_partition.cc +++ b/src/partition/ndarray_partition.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file ndarray_partition.cc * @brief DGL utilities for working with the partitioned NDArrays diff --git a/src/partition/ndarray_partition.h b/src/partition/ndarray_partition.h index 670ed6c15f2f..2ae36ff74c64 100644 --- a/src/partition/ndarray_partition.h +++ b/src/partition/ndarray_partition.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file ndarray_partition.h * @brief DGL utilities for working with the partitioned NDArrays diff --git a/src/partition/partition_op.h b/src/partition/partition_op.h index e5a629c9a3bf..3b251528699d 100644 --- a/src/partition/partition_op.h +++ b/src/partition/partition_op.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file ndarray_partition.h * @brief DGL utilities for working with the partitioned NDArrays diff --git a/src/random/cpu/choice.cc b/src/random/cpu/choice.cc index b1042a944bb9..4629962a2f46 100644 --- a/src/random/cpu/choice.cc +++ b/src/random/cpu/choice.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file random/choice.cc * @brief Non-uniform discrete sampling implementation diff --git a/src/random/cpu/sample_utils.h b/src/random/cpu/sample_utils.h index 2e233d6c39af..745086060184 100644 --- a/src/random/cpu/sample_utils.h +++ b/src/random/cpu/sample_utils.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file dgl/sample_utils.h * @brief Sampling utilities @@ -20,12 +20,12 @@ namespace dgl { namespace utils { -/*! @brief Base sampler class */ +/** @brief Base sampler class */ template class BaseSampler { public: virtual ~BaseSampler() = default; - /*! @brief Draw one integer sample */ + /** @brief Draw one integer sample */ virtual Idx Draw() { LOG(INFO) << "Not implemented yet."; return 0; @@ -37,7 +37,7 @@ class BaseSampler { // probability 0. DType could be uint8 in this case, which will give incorrect arithmetic // results due to overflowing and/or integer division. -/* +/** * AliasSampler is used to sample elements from a given discrete categorical distribution. * Algorithm: Alias Method(https://en.wikipedia.org/wiki/Alias_method) * Sampler building complexity: O(n) @@ -165,7 +165,7 @@ class AliasSampler: public BaseSampler { }; -/* +/** * CDFSampler is used to sample elements from a given discrete categorical distribution. * Algorithm: create a cumulative distribution function and conduct binary search for sampling. * Reference: https://github.com/numpy/numpy/blob/d37908/numpy/random/mtrand.pyx#L804 @@ -256,7 +256,7 @@ class CDFSampler: public BaseSampler { }; -/* +/** * TreeSampler is used to sample elements from a given discrete categorical distribution. * Algorithm: create a heap that stores accumulated likelihood of its leaf descendents. * Reference: https://blog.smola.org/post/1016514759 diff --git a/src/random/random.cc b/src/random/random.cc index 43b6786666a1..f07b995bee41 100644 --- a/src/random/random.cc +++ b/src/random/random.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file random.cc * @brief Random number generator interfaces diff --git a/src/rpc/net_type.h b/src/rpc/net_type.h index 73d99008c72c..7c1686df151f 100644 --- a/src/rpc/net_type.h +++ b/src/rpc/net_type.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2022 by Contributors * @file net_type.h * @brief Base communicator for DGL distributed training. @@ -14,21 +14,21 @@ namespace dgl { namespace rpc { struct RPCBase { - /*! + /** * @brief Finalize Receiver * * Finalize() is not thread-safe and only one thread can invoke this API. */ virtual void Finalize() = 0; - /*! + /** * @brief Communicator type: 'socket', 'tensorpipe', etc */ virtual const std::string &NetType() const = 0; }; struct RPCSender : RPCBase { - /*! + /** * @brief Connect to a receiver. * * When there are multiple receivers to be connected, application will call @@ -44,7 +44,7 @@ struct RPCSender : RPCBase { */ virtual bool ConnectReceiver(const std::string &addr, int recv_id) = 0; - /*! + /** * @brief Finalize the action to connect to receivers. Make sure that either * all connections are successfully established or connection fails. * @return True for success and False for fail @@ -53,7 +53,7 @@ struct RPCSender : RPCBase { */ virtual bool ConnectReceiverFinalize(const int max_try_times) { return true; } - /*! + /** * @brief Send RPCMessage to specified Receiver. * @param msg data message * @param recv_id receiver's ID @@ -62,7 +62,7 @@ struct RPCSender : RPCBase { }; struct RPCReceiver : RPCBase { - /*! + /** * @brief Wait for all the Senders to connect * @param addr Networking address, e.g., 'tcp://127.0.0.1:50051', 'mpi://0' * @param num_sender total number of Senders @@ -74,7 +74,7 @@ struct RPCReceiver : RPCBase { virtual bool Wait( const std::string &addr, int num_sender, bool blocking = true) = 0; - /*! + /** * @brief Recv RPCMessage from Sender. Actually removing data from queue. * @param msg pointer of RPCmessage * @param timeout The timeout value in milliseconds. If zero, wait diff --git a/src/rpc/network/common.cc b/src/rpc/network/common.cc index 91d9a5c80215..fda5e78859df 100644 --- a/src/rpc/network/common.cc +++ b/src/rpc/network/common.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file common.cc * @brief This file provide basic facilities for string diff --git a/src/rpc/network/common.h b/src/rpc/network/common.h index 969676a89979..38131389c010 100644 --- a/src/rpc/network/common.h +++ b/src/rpc/network/common.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file common.h * @brief This file provide basic facilities for string diff --git a/src/rpc/network/communicator.h b/src/rpc/network/communicator.h index c5715fc5815b..642fc8531731 100644 --- a/src/rpc/network/communicator.h +++ b/src/rpc/network/communicator.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file communicator.h * @brief Communicator for DGL distributed training. @@ -16,7 +16,7 @@ namespace dgl { namespace network { -/*! +/** * @brief Network Sender for DGL distributed training. * * Sender is an abstract class that defines a set of APIs for sending binary @@ -27,7 +27,7 @@ namespace network { */ class Sender : public rpc::RPCSender { public: - /*! + /** * @brief Sender constructor * @param queue_size size (bytes) of message queue. * @param max_thread_count size of thread pool. 0 for no limit @@ -42,7 +42,7 @@ class Sender : public rpc::RPCSender { virtual ~Sender() {} - /*! + /** * @brief Send data to specified Receiver. * @param msg data message * @param recv_id receiver's ID @@ -58,17 +58,17 @@ class Sender : public rpc::RPCSender { virtual STATUS Send(Message msg, int recv_id) = 0; protected: - /*! + /** * @brief Size of message queue */ int64_t queue_size_; - /*! + /** * @brief Size of thread pool. 0 for no limit */ int max_thread_count_; }; -/*! +/** * @brief Network Receiver for DGL distributed training. * * Receiver is an abstract class that defines a set of APIs for receiving binary @@ -79,7 +79,7 @@ class Sender : public rpc::RPCSender { */ class Receiver : public rpc::RPCReceiver { public: - /*! + /** * @brief Receiver constructor * @param queue_size size of message queue. * @param max_thread_count size of thread pool. 0 for no limit @@ -96,7 +96,7 @@ class Receiver : public rpc::RPCReceiver { virtual ~Receiver() {} - /*! + /** * @brief Recv data from Sender * @param msg pointer of data message * @param send_id which sender current msg comes from @@ -110,7 +110,7 @@ class Receiver : public rpc::RPCReceiver { */ virtual STATUS Recv(Message* msg, int* send_id, int timeout = 0) = 0; - /*! + /** * @brief Recv data from a specified Sender * @param msg pointer of data message * @param send_id sender's ID @@ -125,11 +125,11 @@ class Receiver : public rpc::RPCReceiver { virtual STATUS RecvFrom(Message* msg, int send_id, int timeout = 0) = 0; protected: - /*! + /** * @brief Size of message queue */ int64_t queue_size_; - /*! + /** * @brief Size of thread pool. 0 for no limit */ int max_thread_count_; diff --git a/src/rpc/network/msg_queue.cc b/src/rpc/network/msg_queue.cc index 228e3816697d..6785fb5fd937 100644 --- a/src/rpc/network/msg_queue.cc +++ b/src/rpc/network/msg_queue.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file msg_queue.cc * @brief Message queue for DGL distributed training. diff --git a/src/rpc/network/msg_queue.h b/src/rpc/network/msg_queue.h index 14d086322f0b..e4d0764f8da5 100644 --- a/src/rpc/network/msg_queue.h +++ b/src/rpc/network/msg_queue.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file msg_queue.h * @brief Message queue for DGL distributed training. @@ -22,7 +22,7 @@ namespace network { typedef int STATUS; -/*! +/** * @brief Status code of message queue */ #define ADD_SUCCESS 3400 // Add message successfully @@ -33,45 +33,45 @@ typedef int STATUS; #define REMOVE_SUCCESS 3405 // Remove message successfully #define QUEUE_EMPTY 3406 // Cannot remove when queue is empty -/*! +/** * @brief Message used by network communicator and message queue. */ struct Message { - /*! + /** * @brief Constructor */ Message() {} - /*! + /** * @brief Constructor */ Message(char* data_ptr, int64_t data_size) : data(data_ptr), size(data_size) {} - /*! + /** * @brief message data */ char* data; - /*! + /** * @brief message size in bytes */ int64_t size; - /*! + /** * @brief message receiver id */ int receiver_id = -1; - /*! + /** * @brief user-defined deallocator, which can be nullptr */ std::function deallocator = nullptr; }; -/*! +/** * @brief Free memory buffer of message */ inline void DefaultMessageDeleter(Message* msg) { delete[] msg->data; } -/*! +/** * @brief Message Queue for network communication. * * MessageQueue is FIFO queue that adopts producer/consumer model for data @@ -89,7 +89,7 @@ inline void DefaultMessageDeleter(Message* msg) { delete[] msg->data; } */ class MessageQueue { public: - /*! + /** * @brief MessageQueue constructor * @param queue_size size (bytes) of message queue * @param num_producers number of producers, use 1 by default @@ -97,12 +97,12 @@ class MessageQueue { explicit MessageQueue( int64_t queue_size /* in bytes */, int num_producers = 1); - /*! + /** * @brief MessageQueue deconstructor */ ~MessageQueue() {} - /*! + /** * @brief Add message to the queue * @param msg data message * @param is_blocking Blocking if cannot add, else return @@ -110,7 +110,7 @@ class MessageQueue { */ STATUS Add(Message msg, bool is_blocking = true); - /*! + /** * @brief Remove message from the queue * @param msg pointer of data msg * @param is_blocking Blocking if cannot remove, else return @@ -118,64 +118,64 @@ class MessageQueue { */ STATUS Remove(Message* msg, bool is_blocking = true); - /*! + /** * @brief Signal that producer producer_id will no longer produce anything * @param producer_id An integer uniquely to identify a producer thread */ void SignalFinished(int producer_id); - /*! + /** * @return true if queue is empty. */ bool Empty() const; - /*! + /** * @return true if queue is empty and all num_producers have signaled. */ bool EmptyAndNoMoreAdd() const; protected: - /*! + /** * @brief message queue */ std::queue queue_; - /*! + /** * @brief Size of the queue in bytes */ int64_t queue_size_; - /*! + /** * @brief Free size of the queue */ int64_t free_size_; - /*! + /** * @brief Used to check all producers will no longer produce anything */ size_t num_producers_; - /*! + /** * @brief Store finished producer id */ std::set finished_producers_; - /*! + /** * @brief Condition when consumer should wait */ std::condition_variable cond_not_full_; - /*! + /** * @brief Condition when producer should wait */ std::condition_variable cond_not_empty_; - /*! + /** * @brief Signal for exit wait */ std::atomic exit_flag_{false}; - /*! + /** * @brief Protect all above data and conditions */ mutable std::mutex mutex_; diff --git a/src/rpc/network/socket_communicator.cc b/src/rpc/network/socket_communicator.cc index d9f9ed965c73..247147828bdd 100644 --- a/src/rpc/network/socket_communicator.cc +++ b/src/rpc/network/socket_communicator.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file communicator.cc * @brief SocketCommunicator for DGL distributed training. diff --git a/src/rpc/network/socket_communicator.h b/src/rpc/network/socket_communicator.h index b5113d2715c1..85919a0be877 100644 --- a/src/rpc/network/socket_communicator.h +++ b/src/rpc/network/socket_communicator.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file communicator.h * @brief SocketCommunicator for DGL distributed training. @@ -25,7 +25,7 @@ static constexpr int kTimeOut = 10 * 60; // 10 minutes (in seconds) for socket timeout static constexpr int kMaxConnection = 1024; // maximal connection: 1024 -/*! +/** * @breif Networking address */ struct IPAddr { @@ -33,14 +33,14 @@ struct IPAddr { int port; }; -/*! +/** * @brief SocketSender for DGL distributed training. * * SocketSender is the communicator implemented by tcp socket. */ class SocketSender : public Sender { public: - /*! + /** * @brief Sender constructor * @param queue_size size of message queue * @param max_thread_count size of thread pool. 0 for no limit @@ -48,7 +48,7 @@ class SocketSender : public Sender { SocketSender(int64_t queue_size, int max_thread_count) : Sender(queue_size, max_thread_count) {} - /*! + /** * @brief Connect to a receiver. * * When there are multiple receivers to be connected, application will call @@ -64,7 +64,7 @@ class SocketSender : public Sender { */ bool ConnectReceiver(const std::string& addr, int recv_id) override; - /*! + /** * @brief Finalize the action to connect to receivers. Make sure that either * all connections are successfully established or connection fails. * @return True for success and False for fail @@ -73,19 +73,19 @@ class SocketSender : public Sender { */ bool ConnectReceiverFinalize(const int max_try_times) override; - /*! + /** * @brief Send RPCMessage to specified Receiver. * @param msg data message * @param recv_id receiver's ID */ void Send(const rpc::RPCMessage& msg, int recv_id) override; - /*! + /** * @brief Finalize TPSender */ void Finalize() override; - /*! + /** * @brief Communicator type: 'socket' */ const std::string& NetType() const override { @@ -93,7 +93,7 @@ class SocketSender : public Sender { return net_type; } - /*! + /** * @brief Send data to specified Receiver. Actually pushing message to message * queue. * @param msg data message. @@ -110,29 +110,29 @@ class SocketSender : public Sender { STATUS Send(Message msg, int recv_id) override; private: - /*! + /** * @brief socket for each connection of receiver */ std::vector< std::unordered_map>> sockets_; - /*! + /** * @brief receivers' address */ std::unordered_map receiver_addrs_; - /*! + /** * @brief message queue for each thread */ std::vector> msg_queue_; - /*! + /** * @brief Independent thread */ std::vector> threads_; - /*! + /** * @brief Send-loop for each thread * @param sockets TCPSockets for current thread * @param queue message_queue for current thread @@ -147,14 +147,14 @@ class SocketSender : public Sender { std::shared_ptr queue); }; -/*! +/** * @brief SocketReceiver for DGL distributed training. * * SocketReceiver is the communicator implemented by tcp socket. */ class SocketReceiver : public Receiver { public: - /*! + /** * @brief Receiver constructor * @param queue_size size of message queue. * @param max_thread_count size of thread pool. 0 for no limit @@ -162,7 +162,7 @@ class SocketReceiver : public Receiver { SocketReceiver(int64_t queue_size, int max_thread_count) : Receiver(queue_size, max_thread_count) {} - /*! + /** * @brief Wait for all the Senders to connect * @param addr Networking address, e.g., 'tcp://127.0.0.1:50051', 'mpi://0' * @param num_sender total number of Senders @@ -174,7 +174,7 @@ class SocketReceiver : public Receiver { bool Wait( const std::string& addr, int num_sender, bool blocking = true) override; - /*! + /** * @brief Recv RPCMessage from Sender. Actually removing data from queue. * @param msg pointer of RPCmessage * @param timeout The timeout value in milliseconds. If zero, wait @@ -183,7 +183,7 @@ class SocketReceiver : public Receiver { */ rpc::RPCStatus Recv(rpc::RPCMessage* msg, int timeout) override; - /*! + /** * @brief Recv data from Sender. Actually removing data from msg_queue. * @param msg pointer of data message * @param send_id which sender current msg comes from @@ -197,7 +197,7 @@ class SocketReceiver : public Receiver { */ STATUS Recv(Message* msg, int* send_id, int timeout = 0) override; - /*! + /** * @brief Recv data from a specified Sender. Actually removing data from * msg_queue. * @param msg pointer of data message. @@ -212,14 +212,14 @@ class SocketReceiver : public Receiver { */ STATUS RecvFrom(Message* msg, int send_id, int timeout = 0) override; - /*! + /** * @brief Finalize SocketReceiver * * Finalize() is not thread-safe and only one thread can invoke this API. */ void Finalize() override; - /*! + /** * @brief Communicator type: 'socket' */ const std::string& NetType() const override { @@ -233,24 +233,24 @@ class SocketReceiver : public Receiver { int64_t received_bytes = 0; char* buffer = nullptr; }; - /*! + /** * @brief number of sender */ int num_sender_; - /*! + /** * @brief server socket for listening connections */ TCPSocket* server_socket_; - /*! + /** * @brief socket for each client connections */ std::vector>> sockets_; - /*! + /** * @brief Message queue for each socket connection */ std::unordered_map< @@ -258,18 +258,18 @@ class SocketReceiver : public Receiver { msg_queue_; std::unordered_map>::iterator mq_iter_; - /*! + /** * @brief Independent thead */ std::vector> threads_; - /*! + /** * @brief queue_sem_ semphore to indicate number of messages in multiple * message queues to prevent busy wait of Recv */ runtime::Semaphore queue_sem_; - /*! + /** * @brief Recv-loop for each thread * @param sockets client sockets of current thread * @param queue message queues of current thread diff --git a/src/rpc/network/socket_pool.cc b/src/rpc/network/socket_pool.cc index 8272706525d2..a9642c9f413e 100644 --- a/src/rpc/network/socket_pool.cc +++ b/src/rpc/network/socket_pool.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file socket_pool.cc * @brief Socket pool of nonblocking sockets for DGL distributed training. diff --git a/src/rpc/network/socket_pool.h b/src/rpc/network/socket_pool.h index 616a1d825775..61c6bd12120d 100644 --- a/src/rpc/network/socket_pool.h +++ b/src/rpc/network/socket_pool.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file socket_pool.h * @brief Socket pool of nonblocking sockets for DGL distributed training. @@ -15,7 +15,7 @@ namespace network { class TCPSocket; -/*! +/** * @brief SocketPool maintains a group of nonblocking sockets, and can provide * active sockets. * Currently SocketPool is based on epoll, a scalable I/O event notification @@ -23,20 +23,20 @@ class TCPSocket; */ class SocketPool { public: - /*! + /** * @brief socket mode read/receive */ static const int READ = 1; - /*! + /** * @brief socket mode write/send */ static const int WRITE = 2; - /*! + /** * @brief SocketPool constructor */ SocketPool(); - /*! + /** * @brief Add a socket to SocketPool * @param socket tcp socket to add * @param socket_id receiver/sender id of the socket @@ -45,19 +45,19 @@ class SocketPool { void AddSocket( std::shared_ptr socket, int socket_id, int events = READ); - /*! + /** * @brief Remove socket from SocketPool * @param socket tcp socket to remove * @return number of remaing sockets in the pool */ size_t RemoveSocket(std::shared_ptr socket); - /*! + /** * @brief SocketPool destructor */ ~SocketPool(); - /*! + /** * @brief Get current active socket. This is a blocking method * @param socket_id output parameter of the socket_id of active socket * @return active TCPSocket @@ -65,27 +65,27 @@ class SocketPool { std::shared_ptr GetActiveSocket(int* socket_id); private: - /*! + /** * @brief Wait for event notification */ void Wait(); - /*! + /** * @brief map from fd to TCPSocket */ std::unordered_map> tcp_sockets_; - /*! + /** * @brief map from fd to socket_id */ std::unordered_map socket_ids_; - /*! + /** * @brief fd for epoll base */ int epfd_; - /*! + /** * @brief queue for current active fds */ std::queue pending_fds_; diff --git a/src/rpc/network/tcp_socket.cc b/src/rpc/network/tcp_socket.cc index 10f734dd85ee..563bb9c34ab4 100644 --- a/src/rpc/network/tcp_socket.cc +++ b/src/rpc/network/tcp_socket.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file tcp_socket.cc * @brief TCP socket for DGL distributed training. diff --git a/src/rpc/network/tcp_socket.h b/src/rpc/network/tcp_socket.h index b34174aeb037..53ac78bd3324 100644 --- a/src/rpc/network/tcp_socket.h +++ b/src/rpc/network/tcp_socket.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file tcp_socket.h * @brief TCP socket for DGL distributed training. @@ -19,23 +19,23 @@ namespace dgl { namespace network { -/*! +/** * @brief TCPSocket is a simple wrapper around a socket. * It supports only TCP connections. */ class TCPSocket { public: - /*! + /** * @brief TCPSocket constructor */ TCPSocket(); - /*! + /** * @brief TCPSocket deconstructor */ ~TCPSocket(); - /*! + /** * @brief Connect to a given server address * @param ip ip address * @param port end port @@ -43,7 +43,7 @@ class TCPSocket { */ bool Connect(const char* ip, int port); - /*! + /** * @brief Bind on the given IP and PORT * @param ip ip address * @param port end port @@ -51,14 +51,14 @@ class TCPSocket { */ bool Bind(const char* ip, int port); - /*! + /** * @brief listen for remote connection * @param max_connection maximal connection * @return true for success and false for failure */ bool Listen(int max_connection); - /*! + /** * @brief wait doe a new connection * @param socket new SOCKET will be stored to socket * @param ip_client new IP will be stored to ip_client @@ -67,7 +67,7 @@ class TCPSocket { */ bool Accept(TCPSocket* socket, std::string* ip_client, int* port_client); - /*! + /** * @brief SetNonBlocking() is needed refering to this example of epoll: * http://www.kernel.org/doc/man-pages/online/pages/man4/epoll.4.html * @param flag true for nonblocking, false for blocking @@ -75,13 +75,13 @@ class TCPSocket { */ bool SetNonBlocking(bool flag); - /*! + /** * @brief Set timeout for socket * @param timeout seconds timeout */ void SetTimeout(int timeout); - /*! + /** * @brief Shut down one or both halves of the connection. * @param ways ways for shutdown * If ways is SHUT_RD, further receives are disallowed. @@ -91,12 +91,12 @@ class TCPSocket { */ bool ShutDown(int ways); - /*! + /** * @brief close socket. */ void Close(); - /*! + /** * @brief Send data. * @param data data for sending * @param len_data length of data @@ -104,7 +104,7 @@ class TCPSocket { */ int64_t Send(const char* data, int64_t len_data); - /*! + /** * @brief Receive data. * @param buffer buffer for receving * @param size_buffer size of buffer @@ -112,14 +112,14 @@ class TCPSocket { */ int64_t Receive(char* buffer, int64_t size_buffer); - /*! + /** * @brief Get socket's file descriptor * @return socket's file descriptor */ int Socket() const; private: - /*! + /** * @brief socket's file descriptor */ int socket_; diff --git a/src/rpc/rpc.cc b/src/rpc/rpc.cc index 9f0a2648639c..6b9beb888671 100644 --- a/src/rpc/rpc.cc +++ b/src/rpc/rpc.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file rpc/rpc.cc * @brief Implementation of RPC utilities used by both server and client sides. @@ -382,7 +382,7 @@ DGL_REGISTER_GLOBAL("distributed.rpc._CAPI_DGLRPCMessageGetTensors") }); #if defined(__linux__) -/*! +/** * @brief The signal handler. * @param s signal */ diff --git a/src/rpc/rpc.h b/src/rpc/rpc.h index abc38260c980..6b59cfdd08ad 100644 --- a/src/rpc/rpc.h +++ b/src/rpc/rpc.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file rpc/rpc.h * @brief Common headers for remote process call (RPC). @@ -34,9 +34,9 @@ struct RPCContext; // Communicator handler type typedef void* CommunicatorHandle; -/*! @brief Context information for RPC communication */ +/** @brief Context information for RPC communication */ struct RPCContext { - /*! + /** * @brief Rank of this process. * * If the process is a client, this is equal to client ID. Otherwise, the @@ -44,57 +44,57 @@ struct RPCContext { */ int32_t rank = -1; - /*! + /** * @brief Cuurent machine ID */ int32_t machine_id = -1; - /*! + /** * @brief Total number of machines. */ int32_t num_machines = 0; - /*! + /** * @brief Message sequence number. */ std::atomic msg_seq{0}; - /*! + /** * @brief Total number of server. */ int32_t num_servers = 0; - /*! + /** * @brief Total number of client. */ int32_t num_clients = 0; - /*! + /** * @brief Current barrier count */ std::unordered_map barrier_count; - /*! + /** * @brief Total number of server per machine. */ int32_t num_servers_per_machine = 0; - /*! + /** * @brief Sender communicator. */ std::shared_ptr sender; - /*! + /** * @brief Receiver communicator. */ std::shared_ptr receiver; - /*! + /** * @brief Tensorpipe global context */ std::shared_ptr ctx; - /*! + /** * @brief Server state data. * * If the process is a server, this stores necessary @@ -105,20 +105,20 @@ struct RPCContext { */ std::shared_ptr server_state; - /*! + /** * @brief Cuurent group ID */ int32_t group_id = -1; int32_t curr_client_id = -1; std::unordered_map> clients_; - /*! @brief Get the RPC context singleton */ + /** @brief Get the RPC context singleton */ static RPCContext* getInstance() { static RPCContext ctx; return &ctx; } - /*! @brief Reset the RPC context */ + /** @brief Reset the RPC context */ static void Reset() { auto* t = getInstance(); t->rank = -1; @@ -159,7 +159,7 @@ struct RPCContext { } }; -/*! +/** * @brief Send out one RPC message. * * The operation is non-blocking -- it does not guarantee the payloads have @@ -177,7 +177,7 @@ struct RPCContext { */ RPCStatus SendRPCMessage(const RPCMessage& msg); -/*! +/** * @brief Receive one RPC message. * * The operation is blocking -- it returns when it receives any message diff --git a/src/rpc/rpc_msg.h b/src/rpc/rpc_msg.h index 7783b664a547..b2d65e74fdb1 100644 --- a/src/rpc/rpc_msg.h +++ b/src/rpc/rpc_msg.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file rpc/rpc_msg.h * @brief Common headers for remote process call (RPC). @@ -16,31 +16,31 @@ namespace dgl { namespace rpc { -/*! @brief RPC message data structure +/** @brief RPC message data structure * * This structure is exposed to Python and can be used as argument or return * value in C API. */ struct RPCMessage : public runtime::Object { - /*! @brief Service ID */ + /** @brief Service ID */ int32_t service_id; - /*! @brief Sequence number of this message. */ + /** @brief Sequence number of this message. */ int64_t msg_seq; - /*! @brief Client ID. */ + /** @brief Client ID. */ int32_t client_id; - /*! @brief Server ID. */ + /** @brief Server ID. */ int32_t server_id; - /*! @brief Payload buffer carried by this request.*/ + /** @brief Payload buffer carried by this request.*/ std::string data; - /*! @brief Extra payloads in the form of tensors.*/ + /** @brief Extra payloads in the form of tensors.*/ std::vector tensors; - /*! @brief Group ID. */ + /** @brief Group ID. */ int32_t group_id{0}; bool Load(dmlc::Stream* stream) { @@ -70,7 +70,7 @@ struct RPCMessage : public runtime::Object { DGL_DEFINE_OBJECT_REF(RPCMessageRef, RPCMessage); -/*! @brief RPC status flag */ +/** @brief RPC status flag */ enum RPCStatus { kRPCSuccess = 0, kRPCTimeOut, diff --git a/src/rpc/server_state.h b/src/rpc/server_state.h index a136ac54feb4..c0952b917a06 100644 --- a/src/rpc/server_state.h +++ b/src/rpc/server_state.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file rpc/server_state.h * @brief Implementation of RPC utilities used by both server and client sides. @@ -17,7 +17,7 @@ namespace dgl { namespace rpc { -/*! +/** * @brief Data stored in one DGL server. * * In a distributed setting, DGL partitions all data associated with the graph @@ -39,16 +39,16 @@ namespace rpc { * shared memory. */ struct ServerState : public runtime::Object { - /*! @brief Key value store for NDArray data */ + /** @brief Key value store for NDArray data */ std::unordered_map kv_store; - /*! @brief Graph structure of one partition */ + /** @brief Graph structure of one partition */ HeteroGraphPtr graph; - /*! @brief Total number of nodes */ + /** @brief Total number of nodes */ int64_t total_num_nodes = 0; - /*! @brief Total number of edges */ + /** @brief Total number of edges */ int64_t total_num_edges = 0; static constexpr const char* _type_key = "server_state.ServerState"; diff --git a/src/rpc/tensorpipe/queue.h b/src/rpc/tensorpipe/queue.h index 14bb59f591f8..8774ebec9753 100644 --- a/src/rpc/tensorpipe/queue.h +++ b/src/rpc/tensorpipe/queue.h @@ -1,4 +1,4 @@ -/* +/** * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * diff --git a/src/rpc/tensorpipe/tp_communicator.cc b/src/rpc/tensorpipe/tp_communicator.cc index b3f13e1b8bfa..a2827ff5422b 100644 --- a/src/rpc/tensorpipe/tp_communicator.cc +++ b/src/rpc/tensorpipe/tp_communicator.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file tp_communicator.cc * @brief Tensorpipe Communicator for DGL distributed training. diff --git a/src/rpc/tensorpipe/tp_communicator.h b/src/rpc/tensorpipe/tp_communicator.h index f8b0719ffb13..0ac3fe41f1d1 100644 --- a/src/rpc/tensorpipe/tp_communicator.h +++ b/src/rpc/tensorpipe/tp_communicator.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file tp_communicator.h * @brief Tensorpipe Communicator for DGL distributed training. @@ -25,14 +25,14 @@ namespace rpc { typedef Queue RPCMessageQueue; -/*! +/** * @brief TPSender for DGL distributed training. * * TPSender is the communicator implemented by tcp socket. */ class TPSender : public RPCSender { public: - /*! + /** * @brief Sender constructor * @param queue_size size of message queue */ @@ -41,12 +41,12 @@ class TPSender : public RPCSender { this->context = ctx; } - /*! + /** * @brief Sender destructor */ ~TPSender() { Finalize(); } - /*! + /** * @brief Connect to a receiver. * * When there are multiple receivers to be connected, application will call @@ -62,19 +62,19 @@ class TPSender : public RPCSender { */ bool ConnectReceiver(const std::string& addr, int recv_id) override; - /*! + /** * @brief Send RPCMessage to specified Receiver. * @param msg data message * @param recv_id receiver's ID */ void Send(const RPCMessage& msg, int recv_id) override; - /*! + /** * @brief Finalize TPSender */ void Finalize() override; - /*! + /** * @brief Communicator type: 'tp' */ const std::string& NetType() const override { @@ -83,31 +83,31 @@ class TPSender : public RPCSender { } private: - /*! + /** * @brief global context of tensorpipe */ std::shared_ptr context; - /*! + /** * @brief pipe for each connection of receiver */ std::unordered_map> pipes_; - /*! + /** * @brief receivers' listening address */ std::unordered_map receiver_addrs_; }; -/*! +/** * @brief TPReceiver for DGL distributed training. * * Tensorpipe Receiver is the communicator implemented by tcp socket. */ class TPReceiver : public RPCReceiver { public: - /*! + /** * @brief Receiver constructor * @param queue_size size of message queue. */ @@ -117,12 +117,12 @@ class TPReceiver : public RPCReceiver { queue_ = std::make_shared(); } - /*! + /** * @brief Receiver destructor */ ~TPReceiver() { Finalize(); } - /*! + /** * @brief Wait for all the Senders to connect * @param addr Networking address, e.g., 'tcp://127.0.0.1:50051' * @param num_sender total number of Senders @@ -134,7 +134,7 @@ class TPReceiver : public RPCReceiver { bool Wait( const std::string& addr, int num_sender, bool blocking = true) override; - /*! + /** * @brief Recv RPCMessage from Sender. Actually removing data from queue. * @param msg pointer of RPCmessage * @param timeout The timeout value in milliseconds. If zero, wait @@ -143,14 +143,14 @@ class TPReceiver : public RPCReceiver { */ RPCStatus Recv(RPCMessage* msg, int timeout) override; - /*! + /** * @brief Finalize SocketReceiver * * Finalize() is not thread-safe and only one thread can invoke this API. */ void Finalize() override; - /*! + /** * @brief Communicator type: 'tp' (tensorpipe) */ const std::string& NetType() const override { @@ -158,7 +158,7 @@ class TPReceiver : public RPCReceiver { return net_type; } - /*! + /** * @brief Issue a receive request on pipe, and push the result into queue */ static void ReceiveFromPipe( @@ -166,45 +166,45 @@ class TPReceiver : public RPCReceiver { std::shared_ptr queue); private: - /*! + /** * @brief Callback for new connection is accepted. */ void OnAccepted(const tensorpipe::Error&, std::shared_ptr); private: - /*! + /** * @brief number of sender */ int num_sender_; - /*! + /** * @brief listener to build pipe */ std::shared_ptr listener; - /*! + /** * @brief global context of tensorpipe */ std::shared_ptr context; - /*! + /** * @brief pipe for each client connections */ std::unordered_map< int /* Sender (virutal) ID */, std::shared_ptr> pipes_; - /*! + /** * @brief RPCMessage queue */ std::shared_ptr queue_; - /*! + /** * @brief number of accepted connections */ std::atomic num_connected_{0}; - /*! + /** * @brief listner */ std::shared_ptr listener_{nullptr}; diff --git a/src/runtime/c_object_api.cc b/src/runtime/c_object_api.cc index 72137c19bba5..322389c7fe35 100644 --- a/src/runtime/c_object_api.cc +++ b/src/runtime/c_object_api.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2016 by Contributors * Implementation of C API (reference: tvm/src/api/c_api.cc) * @file c_api.cc @@ -17,19 +17,19 @@ #include "runtime_base.h" -/*! @brief entry to to easily hold returning information */ +/** @brief entry to to easily hold returning information */ struct DGLAPIThreadLocalEntry { - /*! @brief result holder for returning strings */ + /** @brief result holder for returning strings */ std::vector ret_vec_str; - /*! @brief result holder for returning string pointers */ + /** @brief result holder for returning string pointers */ std::vector ret_vec_charp; - /*! @brief result holder for retruning string */ + /** @brief result holder for retruning string */ std::string ret_str; }; using namespace dgl::runtime; -/*! @brief Thread local store that can be used to hold return values. */ +/** @brief Thread local store that can be used to hold return values. */ typedef dmlc::ThreadLocalStore DGLAPIThreadLocalStore; using DGLAPIObject = std::shared_ptr; diff --git a/src/runtime/c_runtime_api.cc b/src/runtime/c_runtime_api.cc index 344e028b029a..de6710a1792c 100644 --- a/src/runtime/c_runtime_api.cc +++ b/src/runtime/c_runtime_api.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2016-2022 by Contributors * @file c_runtime_api.cc * @brief Runtime API implementation @@ -22,7 +22,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief The name of Device API factory. * @param type The device type. */ diff --git a/src/runtime/config.cc b/src/runtime/config.cc index f1403555634d..b4074c400f1c 100644 --- a/src/runtime/config.cc +++ b/src/runtime/config.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file runtime/config.cc * @brief DGL runtime config diff --git a/src/runtime/cpu_device_api.cc b/src/runtime/cpu_device_api.cc index 9cdecd9f9832..e91c6665e382 100644 --- a/src/runtime/cpu_device_api.cc +++ b/src/runtime/cpu_device_api.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2016-2022 by Contributors * @file cpu_device_api.cc */ diff --git a/src/runtime/cuda/cuda_common.h b/src/runtime/cuda/cuda_common.h index d3025f4e0433..f2c3f83f8473 100644 --- a/src/runtime/cuda/cuda_common.h +++ b/src/runtime/cuda/cuda_common.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file cuda_common.h * @brief Common utilities for CUDA @@ -109,7 +109,7 @@ inline const char* curandGetErrorString(curandStatus_t error) { return "Unrecognized curand error string"; } -/* +/** * @brief Cast data type to cudaDataType_t. */ template @@ -133,7 +133,7 @@ struct cuda_dtype { }; #if CUDART_VERSION >= 11000 -/* +/** * @brief Cast index data type to cusparseIndexType_t. */ template @@ -152,24 +152,24 @@ struct cusparse_idtype { }; #endif -/*! @brief Thread local workspace */ +/** @brief Thread local workspace */ class CUDAThreadEntry { public: - /*! @brief The cusparse handler */ + /** @brief The cusparse handler */ cusparseHandle_t cusparse_handle{nullptr}; - /*! @brief The cublas handler */ + /** @brief The cublas handler */ cublasHandle_t cublas_handle{nullptr}; - /*! @brief The curand generator */ + /** @brief The curand generator */ curandGenerator_t curand_gen{nullptr}; - /*! @brief thread local pool*/ + /** @brief thread local pool*/ WorkspacePool pool; - /*! @brief constructor */ + /** @brief constructor */ CUDAThreadEntry(); // get the threadlocal workspace static CUDAThreadEntry* ThreadLocal(); }; -/*! @brief Get the current CUDA stream */ +/** @brief Get the current CUDA stream */ cudaStream_t getCurrentCUDAStream(); } // namespace runtime } // namespace dgl diff --git a/src/runtime/cuda/cuda_device_api.cc b/src/runtime/cuda/cuda_device_api.cc index c208988a2801..9085403ff500 100644 --- a/src/runtime/cuda/cuda_device_api.cc +++ b/src/runtime/cuda/cuda_device_api.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017-2022 by Contributors * @file cuda_device_api.cc * @brief GPU specific API @@ -194,7 +194,7 @@ class CUDADeviceAPI final : public DeviceAPI { CUDA_CALL(cudaStreamSynchronize(static_cast(stream))); } - /*! NOTE: If the backend is PyTorch, we will use PyTorch's stream management, + /** NOTE: If the backend is PyTorch, we will use PyTorch's stream management, * so just avoid calling our SetStream/CreateStream unless * you really need advanced stream control. * TODO(Xin): Redirect this to PyTorch or remove it. @@ -206,7 +206,7 @@ class CUDADeviceAPI final : public DeviceAPI { return static_cast(getCurrentCUDAStream()); } - /*! NOTE: cudaHostRegister can be called from an arbitrary GPU device, + /** NOTE: cudaHostRegister can be called from an arbitrary GPU device, * so we don't need to specify a ctx. * The pinned memory can be seen by all CUDA contexts, * not just the one that performed the allocation diff --git a/src/runtime/cuda/cuda_hashtable.cu b/src/runtime/cuda/cuda_hashtable.cu index 3202665a23b9..be2fe301e2a2 100644 --- a/src/runtime/cuda/cuda_hashtable.cu +++ b/src/runtime/cuda/cuda_hashtable.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file runtime/cuda/cuda_device_common.cuh * @brief Device level functions for within cuda kernels. diff --git a/src/runtime/cuda/cuda_hashtable.cuh b/src/runtime/cuda/cuda_hashtable.cuh index a4529e1cef46..8a3c3a2c990f 100644 --- a/src/runtime/cuda/cuda_hashtable.cuh +++ b/src/runtime/cuda/cuda_hashtable.cuh @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file runtime/cuda/cuda_device_common.cuh * @brief Device level functions for within cuda kernels. @@ -19,7 +19,7 @@ namespace cuda { template class OrderedHashTable; -/*! +/** * @brief A device-side handle for a GPU hashtable for mapping items to the * first index at which they appear in the provided data array. * @@ -179,7 +179,7 @@ class DeviceOrderedHashTable { friend class OrderedHashTable; }; -/*! +/** * @brief A host-side handle for a GPU hashtable for mapping items to the * first index at which they appear in the provided data array. This host-side * handle is responsible for allocating and free the GPU memory of the diff --git a/src/runtime/cuda/nccl_api.cu b/src/runtime/cuda/nccl_api.cu index 69e4560d6b8e..a0e8d45fb914 100644 --- a/src/runtime/cuda/nccl_api.cu +++ b/src/runtime/cuda/nccl_api.cu @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021-2022 by Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/runtime/cuda/nccl_api.h b/src/runtime/cuda/nccl_api.h index 9bef5579518b..cfef9057c692 100644 --- a/src/runtime/cuda/nccl_api.h +++ b/src/runtime/cuda/nccl_api.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021-2022 by Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/src/runtime/dlpack_convert.cc b/src/runtime/dlpack_convert.cc index e0f021a07043..e1ee57c87a52 100644 --- a/src/runtime/dlpack_convert.cc +++ b/src/runtime/dlpack_convert.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2022 by Contributors * @file src/runtime/dlpack_convert.cc * @brief Conversion between NDArray and DLPack. diff --git a/src/runtime/dso_module.cc b/src/runtime/dso_module.cc index d12c68603445..67c05ef4843c 100644 --- a/src/runtime/dso_module.cc +++ b/src/runtime/dso_module.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file dso_dll_module.cc * @brief Module to load from dynamic shared library. diff --git a/src/runtime/file_util.cc b/src/runtime/file_util.cc index d197e25c3551..00ce2c94dfc0 100644 --- a/src/runtime/file_util.cc +++ b/src/runtime/file_util.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file file_util.cc */ diff --git a/src/runtime/file_util.h b/src/runtime/file_util.h index e667a0bcea38..fea2861e8c56 100644 --- a/src/runtime/file_util.h +++ b/src/runtime/file_util.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file file_util.h * @brief Minimum file manipulation util for runtime. @@ -13,7 +13,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief Get file format from given file name or format argument. * @param file_name The name of the file. * @param format The format of the file. @@ -21,40 +21,40 @@ namespace runtime { std::string GetFileFormat( const std::string& file_name, const std::string& format); -/*! +/** * @return the directory in which DGL stores cached files. * May be set using DGL_CACHE_DIR; defaults to system locations. */ std::string GetCacheDir(); -/*! +/** * @brief Get meta file path given file name and format. * @param file_name The name of the file. */ std::string GetMetaFilePath(const std::string& file_name); -/*! +/** * @brief Get file basename (i.e. without leading directories) * @param file_name The name of the file. * @return the base name */ std::string GetFileBasename(const std::string& file_name); -/*! +/** * @brief Load binary file into a in-memory buffer. * @param file_name The name of the file. * @param data The data to be loaded. */ void LoadBinaryFromFile(const std::string& file_name, std::string* data); -/*! +/** * @brief Load binary file into a in-memory buffer. * @param file_name The name of the file. * @param data The binary data to be saved. */ void SaveBinaryToFile(const std::string& file_name, const std::string& data); -/*! +/** * @brief Save meta data to file. * @param file_name The name of the file. * @param fmap The function info map. @@ -63,7 +63,7 @@ void SaveMetaDataToFile( const std::string& file_name, const std::unordered_map& fmap); -/*! +/** * @brief Load meta data to file. * @param file_name The name of the file. * @param fmap The function info map. diff --git a/src/runtime/meta_data.h b/src/runtime/meta_data.h index 515747c89d3b..d1f735015be5 100644 --- a/src/runtime/meta_data.h +++ b/src/runtime/meta_data.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file meta_data.h * @brief Meta data related utilities @@ -18,7 +18,7 @@ namespace dgl { namespace runtime { -/*! @brief function information needed by device */ +/** @brief function information needed by device */ struct FunctionInfo { std::string name; std::vector arg_types; diff --git a/src/runtime/module.cc b/src/runtime/module.cc index b867c19d03f0..50d50523a139 100644 --- a/src/runtime/module.cc +++ b/src/runtime/module.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file module.cc * @brief DGL module system diff --git a/src/runtime/module_util.cc b/src/runtime/module_util.cc index 6197c9807d32..89a46c29d86a 100644 --- a/src/runtime/module_util.cc +++ b/src/runtime/module_util.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file module_util.cc * @brief Utilities for module. diff --git a/src/runtime/module_util.h b/src/runtime/module_util.h index 17ad81bf7d4f..2bdf8b68e740 100644 --- a/src/runtime/module_util.h +++ b/src/runtime/module_util.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file module_util.h * @brief Helper utilities for module building @@ -20,21 +20,21 @@ typedef int (*BackendPackedCFunc)(void* args, int* type_codes, int num_args); namespace dgl { namespace runtime { -/*! +/** * @brief Wrap a BackendPackedCFunc to packed function. * @param faddr The function address * @param mptr The module pointer node. */ PackedFunc WrapPackedFunc( BackendPackedCFunc faddr, const std::shared_ptr& mptr); -/*! +/** * @brief Load and append module blob to module list * @param mblob The module blob. * @param module_list The module list to append to */ void ImportModuleBlob(const char* mblob, std::vector* module_list); -/*! +/** * @brief Utility to initialize conext function symbols during startup * @param flookup A symbol lookup function. * @tparam FLookup a function of signature string->void* diff --git a/src/runtime/ndarray.cc b/src/runtime/ndarray.cc index d067dce7f794..6c2679c390ab 100644 --- a/src/runtime/ndarray.cc +++ b/src/runtime/ndarray.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017-2022 by Contributors * @file ndarray.cc * @brief NDArray container infratructure. diff --git a/src/runtime/object.cc b/src/runtime/object.cc index f835f7b98c3b..e30ceb9403c2 100644 --- a/src/runtime/object.cc +++ b/src/runtime/object.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file runtime/object.cc * @brief Implementation of runtime object APIs. diff --git a/src/runtime/pack_args.h b/src/runtime/pack_args.h index 6c069e386080..3e7e873b5dc3 100644 --- a/src/runtime/pack_args.h +++ b/src/runtime/pack_args.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file pack_args.h * @brief Utility to pack DGLArgs to other type-erased fution calling @@ -22,7 +22,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief argument union type of 32bit. * Choose 32 bit because most GPU API do not work well with 64 bit. */ @@ -31,7 +31,7 @@ union ArgUnion { uint32_t v_uint32; float v_float32; }; -/*! +/** * @brief Create a packed function from void addr types. * * @param f with signiture (DGLArgs args, DGLRetValue* rv, void* void_args) @@ -43,7 +43,7 @@ union ArgUnion { template inline PackedFunc PackFuncVoidAddr( F f, const std::vector& arg_types); -/*! +/** * @brief Create a packed function that from function only packs buffer * arguments. * @@ -56,7 +56,7 @@ inline PackedFunc PackFuncVoidAddr( template inline PackedFunc PackFuncNonBufferArg( F f, const std::vector& arg_types); -/*! +/** * @brief Create a packed function that from function that takes a packed * arguments. * @@ -70,7 +70,7 @@ inline PackedFunc PackFuncNonBufferArg( template inline PackedFunc PackFuncPackedArg( F f, const std::vector& arg_types); -/*! +/** * @brief Extract number of buffer argument from the argument types. * @param arg_types The argument types. * @return number of buffer arguments @@ -98,7 +98,7 @@ class TempArray { std::vector data_; }; -/*! @brief conversion code used in void arg. */ +/** @brief conversion code used in void arg. */ enum ArgConvertCode { INT64_TO_INT64, INT64_TO_INT32, diff --git a/src/runtime/parallel_for.cpp b/src/runtime/parallel_for.cpp index ae0ca66dd048..d1d26b4587c1 100644 --- a/src/runtime/parallel_for.cpp +++ b/src/runtime/parallel_for.cpp @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2016 by Contributors * Implementation of C API (reference: tvm/src/api/c_api.cc) * @file c_api.cc diff --git a/src/runtime/registry.cc b/src/runtime/registry.cc index 567493bfd479..bed987f745a9 100644 --- a/src/runtime/registry.cc +++ b/src/runtime/registry.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file registry.cc * @brief The global registry of packed function. @@ -111,15 +111,15 @@ ExtTypeVTable* ExtTypeVTable::RegisterInternal( } // namespace runtime } // namespace dgl -/*! @brief entry to to easily hold returning information */ +/** @brief entry to to easily hold returning information */ struct DGLFuncThreadLocalEntry { - /*! @brief result holder for returning strings */ + /** @brief result holder for returning strings */ std::vector ret_vec_str; - /*! @brief result holder for returning string pointers */ + /** @brief result holder for returning string pointers */ std::vector ret_vec_charp; }; -/*! @brief Thread local store that can be used to hold return values. */ +/** @brief Thread local store that can be used to hold return values. */ typedef dmlc::ThreadLocalStore DGLFuncThreadLocalStore; int DGLExtTypeFree(void* handle, int type_code) { diff --git a/src/runtime/resource_manager.cc b/src/runtime/resource_manager.cc index 40266926bb9a..db8beebf0e4b 100644 --- a/src/runtime/resource_manager.cc +++ b/src/runtime/resource_manager.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file resource_manager.cc * @brief Manage the resources. @@ -13,7 +13,7 @@ namespace dgl { namespace runtime { -/* +/** * The runtime allocates resources during the computation. Some of the resources * cannot be destroyed after the process exits especially when the process * doesn't exits normally. We need to keep track of the resources in the system diff --git a/src/runtime/resource_manager.h b/src/runtime/resource_manager.h index 6f050c301b7c..251dd25affb1 100644 --- a/src/runtime/resource_manager.h +++ b/src/runtime/resource_manager.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file resource_manager.h * @brief Manage the resources in the runtime system. @@ -13,7 +13,7 @@ namespace dgl { namespace runtime { -/* +/** * A class that provides the interface to describe a resource that can be * managed by a resource manager. Some of the resources cannot be free'd * automatically when the process exits, especially when the process doesn't diff --git a/src/runtime/runtime_base.h b/src/runtime/runtime_base.h index 6184953e8309..bf3324d703cd 100644 --- a/src/runtime/runtime_base.h +++ b/src/runtime/runtime_base.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2016 by Contributors * @file runtime_base.h * @brief Base of all C APIs @@ -10,9 +10,9 @@ #include -/*! @brief macro to guard beginning and end section of all functions */ +/** @brief macro to guard beginning and end section of all functions */ #define API_BEGIN() try { -/*! @brief every function starts with API_BEGIN(); +/** @brief every function starts with API_BEGIN(); and finishes with API_END() or API_END_HANDLE_ERROR */ #define API_END() \ } \ @@ -20,7 +20,7 @@ return DGLAPIHandleException(_except_); \ } \ return 0; // NOLINT(*) -/*! +/** * @brief every function starts with API_BEGIN(); * and finishes with API_END() or API_END_HANDLE_ERROR * The finally clause contains procedure to cleanup states when an error @@ -34,7 +34,7 @@ } \ return 0; // NOLINT(*) -/*! +/** * @brief handle exception throwed out * @param e the exception * @return the return value of API after exception is handled diff --git a/src/runtime/semaphore_wrapper.cc b/src/runtime/semaphore_wrapper.cc index 40bdf647cf75..e0ab57b7b485 100644 --- a/src/runtime/semaphore_wrapper.cc +++ b/src/runtime/semaphore_wrapper.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file semaphore_wrapper.cc * @brief A simple corss platform semaphore wrapper diff --git a/src/runtime/semaphore_wrapper.h b/src/runtime/semaphore_wrapper.h index 80512ba87de0..6d93dca988b0 100644 --- a/src/runtime/semaphore_wrapper.h +++ b/src/runtime/semaphore_wrapper.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file semaphore_wrapper.h * @brief A simple corss platform semaphore wrapper @@ -15,26 +15,26 @@ namespace dgl { namespace runtime { -/*! +/** * @brief A simple crossplatform Semaphore wrapper */ class Semaphore { public: - /*! + /** * @brief Semaphore constructor */ Semaphore(); - /*! + /** * @brief blocking wait, decrease semaphore by 1 */ void Wait(); - /*! + /** * @brief timed wait, decrease semaphore by 1 or returns if times out * @param timeout The timeout value in milliseconds. If zero, wait * indefinitely. */ bool TimedWait(int timeout); - /*! + /** * @brief increase semaphore by 1 */ void Post(); diff --git a/src/runtime/shared_mem.cc b/src/runtime/shared_mem.cc index 9fe5575e1488..a78f4f70a33e 100644 --- a/src/runtime/shared_mem.cc +++ b/src/runtime/shared_mem.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file shared_mem.cc * @brief Shared memory management. @@ -18,7 +18,7 @@ namespace dgl { namespace runtime { -/* +/** * Shared memory is a resource that cannot be cleaned up if the process doesn't * exit normally. We'll manage the resource with ResourceManager. */ diff --git a/src/runtime/system_lib_module.cc b/src/runtime/system_lib_module.cc index 0621838c7266..f4adfca98961 100644 --- a/src/runtime/system_lib_module.cc +++ b/src/runtime/system_lib_module.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file system_lib_module.cc * @brief SystemLib module. diff --git a/src/runtime/tensordispatch.cc b/src/runtime/tensordispatch.cc index e4400533cfa3..2b37d9d8087d 100644 --- a/src/runtime/tensordispatch.cc +++ b/src/runtime/tensordispatch.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file runtime/tensordispatch.cc * @brief Adapter library caller diff --git a/src/runtime/thread_pool.cc b/src/runtime/thread_pool.cc index b1fcd366de37..4915796f53a1 100644 --- a/src/runtime/thread_pool.cc +++ b/src/runtime/thread_pool.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file thread_pool.cc * @brief Threadpool for multi-threading runtime. @@ -30,7 +30,7 @@ namespace runtime { // stride in the page, fit to cache line. constexpr int kSyncStride = 64 / sizeof(std::atomic); -/*! +/** * @brief Thread local master environment. */ class ParallelLauncher { @@ -112,10 +112,10 @@ class ParallelLauncher { std::vector par_errors_; }; -/*! @brief Lock-free single-producer-single-consumer queue for each thread */ +/** @brief Lock-free single-producer-single-consumer queue for each thread */ class SpscTaskQueue { public: - /*! @brief The task entry */ + /** @brief The task entry */ struct Task { ParallelLauncher* launcher; int32_t task_id; @@ -125,7 +125,7 @@ class SpscTaskQueue { ~SpscTaskQueue() { delete[] buffer_; } - /*! + /** * @brief Push a task into the queue and notify the comsumer if it is on wait. * @param input The task to be dequeued. */ @@ -139,7 +139,7 @@ class SpscTaskQueue { } } - /*! + /** * @brief Pop a task out of the queue and condition wait if no tasks. * @param output The pointer to the task to be dequeued. * @param spin_count The number of iterations to spin before sleep. @@ -169,7 +169,7 @@ class SpscTaskQueue { return true; } - /*! + /** * @brief Signal to terminate the worker. */ void SignalForKill() { @@ -179,7 +179,7 @@ class SpscTaskQueue { } protected: - /*! + /** * @brief Lock-free enqueue. * @param input The task to be enqueued. * @return Whether the task is enqueued. diff --git a/src/runtime/thread_storage_scope.h b/src/runtime/thread_storage_scope.h index 0dc7e8206024..aeb0cd7563fe 100644 --- a/src/runtime/thread_storage_scope.h +++ b/src/runtime/thread_storage_scope.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file thread_storage_scope.h * @brief Extract thread axis configuration from DGLArgs. @@ -14,28 +14,28 @@ namespace dgl { namespace runtime { -/*! +/** * @brief Memory hierachy rank in the storage system * @note The global rank and shared rank have one to one * correspondence to the thread rank. */ enum class StorageRank { - /*! @brief global memory */ + /** @brief global memory */ kGlobal = 0, - /*! @brief shared memory among thread group */ + /** @brief shared memory among thread group */ kShared = 1, - /*! + /** * @brief reserved for warp memory. * This is only used by programming model. * There is no such memory usually in GPU. * Instead, we can simulate it by registers and shuffle. */ kWarp = 2, - /*! @brief thread local memory */ + /** @brief thread local memory */ kLocal = 3 }; -/*! +/** * @param thread_scope_rank The thread scope rank * @return default storage rank given the thread scope */ @@ -54,11 +54,11 @@ inline StorageRank DefaultStorageRank(int thread_scope_rank) { } } -/*! @brief class to represent storage scope */ +/** @brief class to represent storage scope */ struct StorageScope { - /*! @brief The rank of the storage */ + /** @brief The rank of the storage */ StorageRank rank{StorageRank::kGlobal}; - /*! @brief tag for special purpose memory. */ + /** @brief tag for special purpose memory. */ std::string tag; // comparator inline bool operator==(const StorageScope& other) const { @@ -83,7 +83,7 @@ struct StorageScope { return ""; } } - /*! + /** * @brief make storage scope from string * @param s The string to be parsed. * @return The storage scope. @@ -109,13 +109,13 @@ struct StorageScope { } }; -/*! @brief class to represent thread scope */ +/** @brief class to represent thread scope */ struct ThreadScope { - /*! @brief The rank of thread scope */ + /** @brief The rank of thread scope */ int rank{0}; - /*! @brief the dimension index under the rank */ + /** @brief the dimension index under the rank */ int dim_index{0}; - /*! + /** * @brief make storage scope from string * @param s The string to be parsed. * @return The storage scope. @@ -139,22 +139,22 @@ struct ThreadScope { } }; -/*! @brief workload speccification */ +/** @brief workload speccification */ struct ThreadWorkLoad { // array, first three are thread configuration. size_t work_size[6]; - /*! + /** * @param i The block dimension. * @return i-th block dim */ inline size_t block_dim(size_t i) const { return work_size[i + 3]; } - /*! + /** * @param i The grid dimension. * @return i-th grid dim */ inline size_t grid_dim(size_t i) const { return work_size[i]; } }; -/*! @brief Thread axis configuration */ +/** @brief Thread axis configuration */ class ThreadAxisConfig { public: void Init(size_t base, const std::vector& thread_axis_tags) { @@ -187,11 +187,11 @@ class ThreadAxisConfig { size_t work_dim() const { return work_dim_; } private: - /*! @brief base axis */ + /** @brief base axis */ size_t base_; - /*! @brief The worker dimension */ + /** @brief The worker dimension */ size_t work_dim_; - /*! @brief The index mapping. */ + /** @brief The index mapping. */ std::vector arg_index_map_; }; diff --git a/src/runtime/threading_backend.cc b/src/runtime/threading_backend.cc index fcf0a012c3b5..5471c6730caf 100644 --- a/src/runtime/threading_backend.cc +++ b/src/runtime/threading_backend.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file threading_backend.cc * @brief Native threading backend diff --git a/src/runtime/utils.cc b/src/runtime/utils.cc index dba9d47c213e..c10681719928 100644 --- a/src/runtime/utils.cc +++ b/src/runtime/utils.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file utils.cc * @brief DGL util functions diff --git a/src/runtime/workspace.h b/src/runtime/workspace.h index 70907b6382e6..f88e14c7fe7e 100644 --- a/src/runtime/workspace.h +++ b/src/runtime/workspace.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2021 by Contributors * @file ndarray_partition.h * @brief Operations on partition implemented in CUDA. diff --git a/src/runtime/workspace_pool.cc b/src/runtime/workspace_pool.cc index a51405adf70e..5c6ef6b3100b 100644 --- a/src/runtime/workspace_pool.cc +++ b/src/runtime/workspace_pool.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file workspace_pool.h * @brief Workspace pool utility. @@ -108,14 +108,14 @@ class WorkspacePool::Pool { } private: - /*! @brief a single entry in the pool */ + /** @brief a single entry in the pool */ struct Entry { void* data; size_t size; }; - /*! @brief List of free items, sorted from small to big size */ + /** @brief List of free items, sorted from small to big size */ std::vector free_list_; - /*! @brief List of allocated items */ + /** @brief List of allocated items */ std::vector allocated_; }; diff --git a/src/runtime/workspace_pool.h b/src/runtime/workspace_pool.h index 94ee82275ea0..b2bb40513e83 100644 --- a/src/runtime/workspace_pool.h +++ b/src/runtime/workspace_pool.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2017 by Contributors * @file workspace_pool.h * @brief Workspace pool utility. @@ -13,7 +13,7 @@ namespace dgl { namespace runtime { -/*! +/** * @brief A workspace pool to manage * * \note We have the following assumption about backend temporal @@ -26,21 +26,21 @@ namespace runtime { */ class WorkspacePool { public: - /*! + /** * @brief Create pool with specific device type and device. * @param device_type The device type. * @param device The device API. */ WorkspacePool(DGLDeviceType device_type, std::shared_ptr device); - /*! @brief destructor */ + /** @brief destructor */ ~WorkspacePool(); - /*! + /** * @brief Allocate temporal workspace. * @param ctx The context of allocation. * @param size The size to be allocated. */ void* AllocWorkspace(DGLContext ctx, size_t size); - /*! + /** * @brief Free temporal workspace in backend execution. * * @param ctx The context of allocation. @@ -50,11 +50,11 @@ class WorkspacePool { private: class Pool; - /*! @brief pool of device local array */ + /** @brief pool of device local array */ std::vector array_; - /*! @brief device type this pool support */ + /** @brief device type this pool support */ DGLDeviceType device_type_; - /*! @brief The device API */ + /** @brief The device API */ std::shared_ptr device_; }; diff --git a/src/scheduler/scheduler.cc b/src/scheduler/scheduler.cc index 46355b4dde0b..ef45a5c91559 100644 --- a/src/scheduler/scheduler.cc +++ b/src/scheduler/scheduler.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file scheduler/scheduler.cc * @brief DGL Scheduler implementation diff --git a/src/scheduler/scheduler_apis.cc b/src/scheduler/scheduler_apis.cc index 357bee9ac8f8..fe61f8743128 100644 --- a/src/scheduler/scheduler_apis.cc +++ b/src/scheduler/scheduler_apis.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2018 by Contributors * @file scheduler/scheduler_apis.cc * @brief DGL scheduler APIs diff --git a/tensoradapter/include/tensoradapter.h b/tensoradapter/include/tensoradapter.h index 7e7dbecc7c34..fe4cf3b1aab6 100644 --- a/tensoradapter/include/tensoradapter.h +++ b/tensoradapter/include/tensoradapter.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020-2022 by Contributors * @file tensoradapter.h * @brief Header file for functions exposed by the adapter library. @@ -18,7 +18,7 @@ namespace tensoradapter { extern "C" { -/*! +/** * @brief Allocate a piece of CPU memory via * PyTorch's CPUAllocator * @@ -27,7 +27,7 @@ extern "C" { */ void* CPURawAlloc(size_t nbytes); -/*! +/** * @brief Free the CPU memory. * * @param ptr Pointer to the memory to be freed. @@ -35,7 +35,7 @@ void* CPURawAlloc(size_t nbytes); void CPURawDelete(void* ptr); #ifdef DGL_USE_CUDA -/*! +/** * @brief Allocate a piece of GPU memory via * PyTorch's THCCachingAllocator. * @@ -45,19 +45,19 @@ void CPURawDelete(void* ptr); */ void* CUDARawAlloc(size_t nbytes, cudaStream_t stream); -/*! +/** * @brief Free the GPU memory. * * @param ptr Pointer to the memory to be freed. */ void CUDARawDelete(void* ptr); -/*! +/** * @brief Get the current CUDA stream. */ cudaStream_t CUDACurrentStream(); -/*! +/** * @brief Let the caching allocator know which streams are using this tensor. * * @param ptr Pointer of the tensor to be recorded. diff --git a/tensoradapter/include/tensoradapter_exports.h b/tensoradapter/include/tensoradapter_exports.h index 3a41a247ec60..553e24ccfe20 100644 --- a/tensoradapter/include/tensoradapter_exports.h +++ b/tensoradapter/include/tensoradapter_exports.h @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020 by Contributors * @file tensoradapter_exports.h * @brief Header file for functions exposed by the adapter library. diff --git a/tensoradapter/pytorch/torch.cpp b/tensoradapter/pytorch/torch.cpp index b5bc7510ee00..bc18113e318c 100644 --- a/tensoradapter/pytorch/torch.cpp +++ b/tensoradapter/pytorch/torch.cpp @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2020-2022 by Contributors * @file torch/torch.cpp * @brief Implementation of PyTorch adapter library. diff --git a/tests/cpp/graph_index_test.cc b/tests/cpp/graph_index_test.cc index 41aa5d38bab5..cf2700da96be 100644 --- a/tests/cpp/graph_index_test.cc +++ b/tests/cpp/graph_index_test.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file graph_index_test.cc * @brief Test GraphIndex diff --git a/tests/cpp/message_queue_test.cc b/tests/cpp/message_queue_test.cc index d228a872384f..fa42c93f2747 100644 --- a/tests/cpp/message_queue_test.cc +++ b/tests/cpp/message_queue_test.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file msg_queue.cc * @brief Message queue for DGL distributed training. diff --git a/tests/cpp/socket_communicator_test.cc b/tests/cpp/socket_communicator_test.cc index a47d2fc1dbf5..ae307ab725cb 100644 --- a/tests/cpp/socket_communicator_test.cc +++ b/tests/cpp/socket_communicator_test.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file socket_communicator_test.cc * @brief Test SocketCommunicator diff --git a/tests/cpp/string_test.cc b/tests/cpp/string_test.cc index 2d56771f805e..272155dc24da 100644 --- a/tests/cpp/string_test.cc +++ b/tests/cpp/string_test.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file string_test.cc * @brief Test String Common diff --git a/tests/cpp/test_aten.cc b/tests/cpp/test_aten.cc index f990c9e74673..dc0504ca814b 100644 --- a/tests/cpp/test_aten.cc +++ b/tests/cpp/test_aten.cc @@ -264,7 +264,7 @@ void _TestConcat(DGLContext ctx) { template void _TestToSimpleCsr(DGLContext ctx) { - /* + /** * A = [[0, 0, 0, 0], * [1, 0, 0, 1], * [1, 1, 1, 1], @@ -341,7 +341,7 @@ TEST(MatrixTest, TestToSimpleCsr) { template void _TestToSimpleCoo(DGLContext ctx) { - /* + /** * A = [[0, 0, 0, 0], * [1, 0, 0, 1], * [1, 1, 1, 1], @@ -429,7 +429,7 @@ TEST(MatrixTest, TestToSimpleCoo) { template void _TestDisjointUnionPartitionCoo(DGLContext ctx) { - /* + /** * A = [[0, 0, 1], * [1, 0, 1], * [0, 1, 0]] @@ -564,7 +564,7 @@ TEST(DisjointUnionTest, TestDisjointUnionPartitionCoo) { template void _TestDisjointUnionPartitionCsr(DGLContext ctx) { - /* + /** * A = [[0, 0, 1], * [1, 0, 1], * [0, 1, 0]] @@ -690,7 +690,7 @@ TEST(DisjointUnionTest, TestDisjointUnionPartitionCsr) { template void _TestSliceContiguousChunkCoo(DGLContext ctx) { - /* + /** * A = [[1, 0, 0, 0], * [0, 0, 1, 0], * [0, 0, 0, 0]] @@ -758,7 +758,7 @@ TEST(SliceContiguousChunk, TestSliceContiguousChunkCoo) { template void _TestSliceContiguousChunkCsr(DGLContext ctx) { - /* + /** * A = [[1, 0, 0, 0], * [0, 0, 1, 0], * [0, 0, 0, 0]] @@ -825,7 +825,7 @@ TEST(SliceContiguousChunk, TestSliceContiguousChunkCsr) { template void _TestMatrixUnionCsr(DGLContext ctx) { - /* + /** * A = [[0, 0, 0, 0], * [0, 0, 0, 0], * [0, 1, 0, 0], @@ -1016,7 +1016,7 @@ TEST(MatrixUnionTest, TestMatrixUnionCsr) { template void _TestMatrixUnionCoo(DGLContext ctx) { - /* + /** * A = [[0, 0, 0, 0], * [0, 0, 0, 0], * [0, 1, 0, 0], @@ -1293,7 +1293,7 @@ TEST(ArrayTest, NonZero) { template void _TestLineGraphCOO(DGLContext ctx) { - /* + /** * A = [[0, 0, 1, 0], * [1, 0, 1, 0], * [1, 1, 0, 0], diff --git a/tests/cpp/test_unit_graph.cc b/tests/cpp/test_unit_graph.cc index d32efb2f9c46..20cc1ebe24cf 100644 --- a/tests/cpp/test_unit_graph.cc +++ b/tests/cpp/test_unit_graph.cc @@ -1,4 +1,4 @@ -/*! +/** * Copyright (c) 2019 by Contributors * @file test_unit_graph.cc * @brief Test UnitGraph @@ -20,7 +20,7 @@ using namespace dgl::runtime; template aten::CSRMatrix CSR1(DGLContext ctx) { - /* + /** * G = [[0, 0, 1], * [1, 0, 1], * [0, 1, 0], @@ -41,7 +41,7 @@ template aten::CSRMatrix CSR1(DGLContext ctx); template aten::COOMatrix COO1(DGLContext ctx) { - /* + /** * G = [[1, 1, 0], * [0, 1, 0]] */ @@ -60,7 +60,7 @@ template aten::COOMatrix COO1(DGLContext ctx); template void _TestUnitGraph_InOutDegrees(DGLContext ctx) { - /* + /** InDegree(s) is available only if COO or CSC formats permitted. OutDegree(s) is available only if COO or CSR formats permitted. */