From 73f33e63760ffc4af0e4f274f05623b6578762a2 Mon Sep 17 00:00:00 2001 From: nyLiao <39255546+nyLiao@users.noreply.github.com> Date: Sun, 14 Jul 2024 20:20:40 +0800 Subject: [PATCH] Format model docs --- pyg_spectral/nn/conv/base_mp.py | 6 +- pyg_spectral/nn/models/acm_gnn.py | 56 +++----- pyg_spectral/nn/models/ada_gnn.py | 26 ++-- pyg_spectral/nn/models/base_nn.py | 69 +++++---- pyg_spectral/nn/models/cpp_comp.py | 4 + pyg_spectral/nn/models/decoupled.py | 110 ++++++-------- pyg_spectral/nn/models/iterative.py | 62 +++----- pyg_spectral/nn/models/precomputed.py | 189 ++++++++----------------- pyg_spectral/nn/norm/standard_scale.py | 8 +- pyg_spectral/profile/efficiency.py | 4 +- 10 files changed, 197 insertions(+), 337 deletions(-) diff --git a/pyg_spectral/nn/conv/base_mp.py b/pyg_spectral/nn/conv/base_mp.py index 7f044ef..75d04ba 100644 --- a/pyg_spectral/nn/conv/base_mp.py +++ b/pyg_spectral/nn/conv/base_mp.py @@ -56,8 +56,7 @@ def get_propagate_mat(self, :meth:`forward()` with same input. Args: - x: from :class:`torch_geometric.data.Data` - edge_index: from :class:`torch_geometric.data.Data` + x, edge_index: from :class:`torch_geometric.data.Data` Attributes: propagate_mat (str): propagation schemes, separated by ``,``. Each scheme starts with ``A`` or ``L`` for adjacency or Laplacian, @@ -153,8 +152,7 @@ def get_forward_mat(self, r"""Get matrices for :meth:`forward()`. Called during :meth:`forward()`. Args: - x: from :class:`torch_geometric.data.Data` - edge_index: from :class:`torch_geometric.data.Data` + x, edge_index: from :class:`torch_geometric.data.Data` Returns: out (Tensor): output tensor (shape: :math:`(|\mathcal{V}|, F)`) prop (Adj): propagation matrix diff --git a/pyg_spectral/nn/models/acm_gnn.py b/pyg_spectral/nn/models/acm_gnn.py index 5dc976a..4e9ff03 100644 --- a/pyg_spectral/nn/models/acm_gnn.py +++ b/pyg_spectral/nn/models/acm_gnn.py @@ -11,30 +11,22 @@ class ACMGNN(BaseNN): r"""Iterative structure for ACM conv. - paper: Revisiting Heterophily For Graph Neural Networks - paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks - ref: https://github.com/SitaoLuan/ACM-GNN + + :paper: Revisiting Heterophily For Graph Neural Networks + :paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks + :ref: https://github.com/SitaoLuan/ACM-GNN Args: theta_scheme (str): Channel list. "FBGNN"="low-high", "ACMGNN"="low-high-id", ("ACMGNN+"="low-high-id-struct", not implemented). weight_initializer (str, optional): The initializer for the weight. - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, @@ -68,30 +60,22 @@ def init_conv(self, class ACMGNNDec(BaseNN): r"""Decoupled structure for ACM conv. - paper: Revisiting Heterophily For Graph Neural Networks - paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks - ref: https://github.com/SitaoLuan/ACM-GNN + + :paper: Revisiting Heterophily For Graph Neural Networks + :paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks + :ref: https://github.com/SitaoLuan/ACM-GNN Args: theta_scheme (str): Channel list. "FBGNN"="low-high", "ACMGNN"="low-high-id", ("ACMGNN+"="low-high-id-struct", not implemented). weight_initializer (str, optional): The initializer for the weight. - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, diff --git a/pyg_spectral/nn/models/ada_gnn.py b/pyg_spectral/nn/models/ada_gnn.py index ea01b63..053d34d 100644 --- a/pyg_spectral/nn/models/ada_gnn.py +++ b/pyg_spectral/nn/models/ada_gnn.py @@ -10,26 +10,18 @@ class AdaGNN(BaseNN): r"""Decoupled structure with diag transformation each hop of propagation. - paper: AdaGNN: Graph Neural Networks with Adaptive Frequency Response Filter - ref: https://github.com/yushundong/AdaGNN + + :paper: AdaGNN: Graph Neural Networks with Adaptive Frequency Response Filter + :ref: https://github.com/yushundong/AdaGNN Args: - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, diff --git a/pyg_spectral/nn/models/base_nn.py b/pyg_spectral/nn/models/base_nn.py index 5f525a9..99d989c 100644 --- a/pyg_spectral/nn/models/base_nn.py +++ b/pyg_spectral/nn/models/base_nn.py @@ -21,19 +21,18 @@ class BaseNN(nn.Module): r"""Base NN structure with MLP before and after convolution layers. Args: - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + conv: Name of :class:`pyg_spectral.nn.conv` module. + num_hops: Total number of conv hops. + in_channels: Size of each input sample. + hidden_channels: Size of each hidden sample. + out_channels: Size of each output sample. + in_layers: Number of MLP layers before conv. + out_layers: Number of MLP layers after conv. + dropout_lin: Dropout probability for both MLPs. + dropout_conv: Dropout probability before conv. act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. + args for :class:`torch_geometric.nn.models.MLP`. + lib_conv: Parent module library other than :class:`pyg_spectral.nn.conv`. **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ supports_edge_weight: Final[bool] = False @@ -168,7 +167,7 @@ def preprocess(self, x: Tensor, edge_index: Adj ) -> Any: - r"""Preprocessing step that not counted in forward() overhead. + r"""Preprocessing step that not counted in :meth:`forward()` overhead. Here mainly transforming graph adjacency to actual propagation matrix. """ return self.get_propagate_mat(x, edge_index) @@ -199,18 +198,16 @@ def forward(self, ) -> Tensor: r""" Args: - x (Tensor), edge_index (Adj): from pyg.data.Data - batch (Tensor, optional): The batch vector + x, edge_index: from :class:`torch_geometric.data.Data` + batch: The batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each element to a specific example. Only needs to be passed in case the underlying normalization layers require the :obj:`batch` information. - (default: :obj:`None`) - batch_size (int, optional): The number of examples :math:`B`. + batch_size: The number of examples :math:`B`. Automatically calculated if not given. Only needs to be passed in case the underlying normalization layers require the :obj:`batch` information. - (default: :obj:`None`) """ if self.in_layers > 0: x = self.in_mlp(x, batch=batch, batch_size=batch_size) @@ -224,29 +221,27 @@ class BaseNNCompose(BaseNN): r"""Base NN structure with multiple conv channels. Args: - combine (str): How to combine different channels of convs. (one of - "sum", "sum_weighted", "cat"). - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + combine (str): How to combine different channels of convs. (:obj:`sum`, + :obj:`sum_weighted`, or :obj:`cat`). + conv: Name of :class:`pyg_spectral.nn.conv` module. + num_hops: Total number of conv hops. + in_channels: Size of each input sample. + hidden_channels: Size of each hidden sample. + out_channels: Size of each output sample. + in_layers: Number of MLP layers before conv. + out_layers: Number of MLP layers after conv. + dropout_lin: Dropout probability for both MLPs. + dropout_conv: Dropout probability before conv. act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + lib_conv: Parent module library other than :class:`pyg_spectral.nn.conv`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_channel_list(self, conv: str, in_channels: int, hidden_channels: int, out_channels: int, **kwargs) -> List[int]: """ - self.channel_list: width for each conv channel + Attributes: + channel_list: width for each conv channel """ self.combine = kwargs.pop('combine', 'sum') n_conv = len(conv.split(',')) @@ -317,7 +312,7 @@ def preprocess(self, x: Tensor, edge_index: Adj ) -> Any: - r"""Preprocessing step that not counted in forward() overhead. + r"""Preprocessing step that not counted in :meth:`forward()` overhead. Here mainly transforming graph adjacency to actual propagation matrix. """ return [f(x, edge_index) for f in self.get_propagate_mat()] diff --git a/pyg_spectral/nn/models/cpp_comp.py b/pyg_spectral/nn/models/cpp_comp.py index 7c443d9..a84f3c1 100644 --- a/pyg_spectral/nn/models/cpp_comp.py +++ b/pyg_spectral/nn/models/cpp_comp.py @@ -7,6 +7,10 @@ class CppCompFixed(PrecomputedFixed): + r"""Decoupled structure with C++ propagation precomputation. + Fixed scalar propagation parameters and accumulating precompute results. + """ + def preprocess(self, x: Tensor, edge_index: Adj diff --git a/pyg_spectral/nn/models/decoupled.py b/pyg_spectral/nn/models/decoupled.py index 78a83fb..fe93fe8 100644 --- a/pyg_spectral/nn/models/decoupled.py +++ b/pyg_spectral/nn/models/decoupled.py @@ -113,28 +113,22 @@ def gen_theta(num_hops: int, scheme: str, param: Union[float, List[float]] = Non class DecoupledFixed(BaseNN): r"""Decoupled structure without matrix transformation during propagation. - Fixed scalar propagation parameters. - NOTE: Apply conv every forward() call. Not to be mixed with :class:`Precomputed` models. + Fixed scalar propagation parameters. + + .. Note :: + Apply conv every :meth:`forward()` call. + Not to be mixed with :class:`Precomputed` models. Args: theta_scheme (str): Method to generate decoupled parameters. theta_param (float, optional): Hyperparameter for the scheme. - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, @@ -161,28 +155,22 @@ def init_conv(self, class DecoupledVar(BaseNN): r"""Decoupled structure without matrix transformation during propagation. - Learnable scalar propagation parameters. - NOTE: Apply conv every forward() call. Not to be mixed with :class:`Precomputed` models. + Learnable scalar propagation parameters. + + .. Note :: + Apply conv every :meth:`forward()` call. + Not to be mixed with :class:`Precomputed` models. Args: theta_scheme (str): Method to generate decoupled parameters. theta_param (float, optional): Hyperparameter for the scheme. - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, @@ -219,29 +207,20 @@ def reset_parameters(self): # ========== class DecoupledFixedCompose(BaseNNCompose): r"""Decoupled structure without matrix transformation during propagation. - Fixed scalar propagation parameters. + Fixed scalar propagation parameters. Args: theta_scheme (List[str]): Method to generate decoupled parameters. theta_param (List[float], optional): Hyperparameter for the scheme. - combine (str): How to combine different channels of convs. (one of - "sum", "sum_weighted", "cat"). - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + combine: How to combine different channels of convs. (:obj:`sum`, + :obj:`sum_weighted`, or :obj:`cat`). + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, @@ -284,29 +263,20 @@ def init_conv(self, class DecoupledVarCompose(BaseNNCompose): r"""Decoupled structure without matrix transformation during propagation. - Learnable scalar propagation parameters. + Learnable scalar propagation parameters. Args: theta_scheme (List[str]): Method to generate decoupled parameters. theta_param (List[float], optional): Hyperparameter for the scheme. - combine (str): How to combine different channels of convs. (one of - "sum", "sum_weighted", "cat"). - --- BaseNN Args --- - conv (List[str]): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + combine: How to combine different channels of convs. (:obj:`sum`, + :obj:`sum_weighted`, or :obj:`cat`). + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, diff --git a/pyg_spectral/nn/models/iterative.py b/pyg_spectral/nn/models/iterative.py index 061b7f1..c262998 100644 --- a/pyg_spectral/nn/models/iterative.py +++ b/pyg_spectral/nn/models/iterative.py @@ -10,28 +10,19 @@ class Iterative(BaseNN): r"""Iterative structure with matrix transformation each hop of propagation. Args: - bias (bool, optional): whether learn an additive bias in conv. - weight_initializer (str, optional): The initializer for the weight - matrix (:obj:`"glorot"`, :obj:`"uniform"`, :obj:`"kaiming_uniform"` + bias (Optional[bool]): whether learn an additive bias in conv. + weight_initializer (Optional[str]): The initializer for the weight + matrix (:obj:`"glorot"`, :obj:`"uniform"`, :obj:`"kaiming_uniform"`, or :obj:`None`). - bias_initializer (str, optional): The initializer for the bias vector + bias_initializer (Optional[str]): The initializer for the bias vector (:obj:`"zeros"` or :obj:`None`). - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, @@ -67,30 +58,21 @@ class IterativeCompose(BaseNNCompose): r"""Iterative structure with matrix transformation each hop of propagation. Args: - bias (bool, optional): whether learn an additive bias in conv. - weight_initializer (str, optional): The initializer for the weight - matrix (:obj:`"glorot"`, :obj:`"uniform"`, :obj:`"kaiming_uniform"` + bias (Optional[bool]): whether learn an additive bias in conv. + weight_initializer (Optional[str]): The initializer for the weight + matrix (:obj:`"glorot"`, :obj:`"uniform"`, :obj:`"kaiming_uniform"`, or :obj:`None`). - bias_initializer (str, optional): The initializer for the bias vector + bias_initializer (Optional[str]): The initializer for the bias vector (:obj:`"zeros"` or :obj:`None`). - combine (str): How to combine different channels of convs. (one of - "sum", "sum_weighted", "cat"). - --- BaseNN Args --- - conv (List[str]): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + combine: How to combine different channels of convs. (:obj:`sum`, + :obj:`sum_weighted`, or :obj:`cat`). + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def init_conv(self, diff --git a/pyg_spectral/nn/models/precomputed.py b/pyg_spectral/nn/models/precomputed.py index a5a295c..c3af4a9 100644 --- a/pyg_spectral/nn/models/precomputed.py +++ b/pyg_spectral/nn/models/precomputed.py @@ -8,27 +8,22 @@ class PrecomputedFixed(DecoupledFixed): r"""Decoupled structure with precomputation separating propagation from transformation. - Fixed scalar propagation parameters and accumulating precompute results. - NOTE: Only apply propagation in `convolute()`. Not to be mixed with :class:`Decoupled` models. + Fixed scalar propagation parameters and accumulating precompute results. + + .. Note :: + Only apply propagation in :meth:`convolute()`. + Not to be mixed with :class:`Decoupled` models. Args: theta_scheme (str): Method to generate decoupled parameters. - theta_param (float, optional): Hyperparameter for the scheme. - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + theta_param (Optional[float]): Hyperparameter for the scheme. + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def __init__(self, in_layers: Optional[int] = None, **kwargs): @@ -40,7 +35,8 @@ def convolute(self, edge_index: Adj, ) -> Tensor: r"""Decoupled propagation step for calling the convolutional module. - Requires no variable transformation in conv.forward(). + Requires no variable transformation in :meth:`conv.forward()`. + Returns: embed (Tensor): Precomputed node embeddings. """ @@ -56,18 +52,8 @@ def forward(self, ) -> Tensor: r""" Args: - x (Tensor): the output `embed` from `convolute()`. - batch (Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each element to a specific example. - Only needs to be passed in case the underlying normalization - layers require the :obj:`batch` information. - (default: :obj:`None`) - batch_size (int, optional): The number of examples :math:`B`. - Automatically calculated if not given. - Only needs to be passed in case the underlying normalization - layers require the :obj:`batch` information. - (default: :obj:`None`) + x: the output :obj:`embed` from :meth:`convolute()`. + batch, batch_size: Args for :class:`BaseNN` """ if self.out_layers > 0: x = self.out_mlp(x, batch=batch, batch_size=batch_size) @@ -76,27 +62,22 @@ def forward(self, class PrecomputedVar(DecoupledVar): r"""Decoupled structure with precomputation separating propagation from transformation. - Learnable scalar propagation parameters and storing all intermediate precompute results. - NOTE: Only apply propagation in `convolute()`. Not to be mixed with :class:`Decoupled` models. + Learnable scalar propagation parameters and storing all intermediate precompute results. + + .. Note :: + Only apply propagation in :meth:`convolute()`. + Not to be mixed with :class:`Decoupled` models. Args: theta_scheme (str): Method to generate decoupled parameters. - theta_param (float, optional): Hyperparameter for the scheme. - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + theta_param (Optional[float]): Hyperparameter for the scheme. + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def __init__(self, in_layers: Optional[int] = None, **kwargs): assert in_layers is None or in_layers == 0, "PrecomputedVar does not support in_layers." @@ -107,10 +88,11 @@ def convolute(self, edge_index: Adj, ) -> list: r"""Decoupled propagation step for calling the convolutional module. - `self._forward()` should not contain derivable computations. + :meth:`_forward()` should not contain derivable computations. + Returns: - embed (Tensor): List of precomputed node embeddings of each hop. - Each shape is :math:`(|\mathcal{V}|, F, len(convs)+1)`. + embed: List of precomputed node embeddings of each hop. + Each shape is :math:`(|\mathcal{V}|, F, |convs|+1)`. """ conv_mat = self.get_forward_mat(x, edge_index, comp_scheme='convolute') @@ -131,18 +113,8 @@ def forward(self, ) -> Tensor: r""" Args: - x (Tensor): the output `embed` from `convolute()`. - batch (Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each element to a specific example. - Only needs to be passed in case the underlying normalization - layers require the :obj:`batch` information. - (default: :obj:`None`) - batch_size (int, optional): The number of examples :math:`B`. - Automatically calculated if not given. - Only needs to be passed in case the underlying normalization - layers require the :obj:`batch` information. - (default: :obj:`None`) + x: the output :obj:`embed` from :meth:`convolute()`. + batch, batch_size: Args for :class:`BaseNN` """ conv_mat = self.get_forward_mat(xs[..., 0], None, comp_scheme='forward') for k, conv in enumerate(self.convs): @@ -159,29 +131,20 @@ def forward(self, # ========== class PrecomputedFixedCompose(DecoupledFixedCompose): r"""Decoupled structure with precomputation separating propagation from transformation. - Fixed scalar propagation parameters and accumulating precompute results. + Fixed scalar propagation parameters and accumulating precompute results. Args: theta_scheme (List[str]): Method to generate decoupled parameters. theta_param (List[float], optional): Hyperparameter for the scheme. - combine (str): How to combine different channels of convs. (one of - "sum", "sum_weighted", "cat"). - --- BaseNN Args --- - conv (str): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + combine: How to combine different channels of convs. (:obj:`sum`, + :obj:`sum_weighted`, or :obj:`cat`). + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def convolute(self, @@ -189,10 +152,10 @@ def convolute(self, edge_index: Adj, ) -> Tensor: r"""Decoupled propagation step for calling the convolutional module. - Requires no variable transformation in conv.forward(). + Requires no variable transformation in :meth:`conv.forward()`. + Returns: - embed (Tensor): Precomputed node embeddings. - Shape: :math:`(|\mathcal{V}|, F, Q)`. + embed (Tensor): Precomputed node embeddings. (shape: :math:`(|\mathcal{V}|, F, Q)`) """ out = [] conv_mats = self.get_forward_mat() @@ -210,18 +173,8 @@ def forward(self, ) -> Tensor: r""" Args: - x (Tensor): the output `embed` from `convolute()`. - batch (Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each element to a specific example. - Only needs to be passed in case the underlying normalization - layers require the :obj:`batch` information. - (default: :obj:`None`) - batch_size (int, optional): The number of examples :math:`B`. - Automatically calculated if not given. - Only needs to be passed in case the underlying normalization - layers require the :obj:`batch` information. - (default: :obj:`None`) + x: the output :obj:`embed` from :meth:`convolute()`. + batch, batch_size: Args for :class:`BaseNN` """ out = None for i, channel in enumerate(self.convs): @@ -242,29 +195,20 @@ def forward(self, class PrecomputedVarCompose(DecoupledVarCompose): r"""Decoupled structure with precomputation separating propagation from transformation. - Learnable scalar propagation parameters and storing all intermediate precompute results. + Learnable scalar propagation parameters and storing all intermediate precompute results. Args: theta_scheme (List[str]): Method to generate decoupled parameters. theta_param (List[float], optional): Hyperparameter for the scheme. - combine (str): How to combine different channels of convs. (one of - "sum", "sum_weighted", "cat"). - --- BaseNN Args --- - conv (List[str]): Name of :class:`pyg_spectral.nn.conv` module. - num_hops (int): Total number of conv hops. - in_channels (int): Size of each input sample. - hidden_channels (int): Size of each hidden sample. - out_channels (int): Size of each output sample. - in_layers (int): Number of MLP layers before conv. - out_layers (int): Number of MLP layers after conv. - dropout_lin (float, optional): Dropout probability for both MLPs. - dropout_conv (float, optional): Dropout probability before conv. + combine: How to combine different channels of convs. (:obj:`sum`, + :obj:`sum_weighted`, or :obj:`cat`). + conv, num_hops, in_channels, hidden_channels, out_channels: + args for :class:`BaseNN` + in_layers, out_layers, dropout_lin, dropout_conv, lib_conv: + args for :class:`BaseNN` act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias: - args for :class:`pyg.nn.models.MLP`. - lib_conv (str, optional): Parent module library other than - :class:`pyg_spectral.nn.conv`. - **kwargs (optional): Additional arguments of the - :class:`pyg_spectral.nn.conv` module. + args for :class:`torch_geometric.nn.models.MLP`. + **kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`. """ def convolute(self, @@ -272,10 +216,11 @@ def convolute(self, edge_index: Adj, ) -> Tensor: r"""Decoupled propagation step for calling the convolutional module. - Requires no variable transformation in conv.forward(). + Requires no variable transformation in :meth:`conv.forward()`. + Returns: embed (Tensor): List of precomputed node embeddings of each hop. - Shape: :math:`(|\mathcal{V}|, F, Q, len(convs)+1)`. + Shape: :math:`(|\mathcal{V}|, F, Q, |convs|+1)`. """ out = [] conv_mats = self.get_forward_mat() @@ -300,18 +245,8 @@ def forward(self, ) -> Tensor: r""" Args: - x (Tensor): the output `embed` from `convolute()`. - batch (Tensor, optional): The batch vector - :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns - each element to a specific example. - Only needs to be passed in case the underlying normalization - layers require the :obj:`batch` information. - (default: :obj:`None`) - batch_size (int, optional): The number of examples :math:`B`. - Automatically calculated if not given. - Only needs to be passed in case the underlying normalization - layers require the :obj:`batch` information. - (default: :obj:`None`) + x: the output :obj:`embed` from :meth:`convolute()`. + batch, batch_size: Args for :class:`BaseNN` """ out = None conv_mats = self.get_forward_mat() diff --git a/pyg_spectral/nn/norm/standard_scale.py b/pyg_spectral/nn/norm/standard_scale.py index d365315..2853883 100755 --- a/pyg_spectral/nn/norm/standard_scale.py +++ b/pyg_spectral/nn/norm/standard_scale.py @@ -9,7 +9,7 @@ class TensorStandardScaler(nn.Module): Applies standard Gaussian normalization to :math:`\mathcal{N}(0, 1)`. Args: - dim (int): Dimension to calculate mean and std. Default is 0. + dim: Dimension to calculate mean and std. """ def __init__(self, dim: int = 0): super(TensorStandardScaler, self).__init__() @@ -22,7 +22,7 @@ def fit(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: Compute the mean and std to be used for later scaling. Args: - x (torch.Tensor): Data used to compute the mean and standard deviation + x: Data used to compute the mean and standard deviation Returns: var_mean (Tuple[torch.Tensor, torch.Tensor]): Tuple of mean and std. @@ -37,8 +37,8 @@ def forward(self, x: torch.Tensor, with_mean: bool = False) -> torch.Tensor: Forward pass. Args: - x (torch.Tensor): The source tensor. - with_mean (bool, optional): Whether to center the data before scaling. Defaults to False. + x: The source tensor. + with_mean: Whether to center the data before scaling. """ if with_mean: x -= self.mean diff --git a/pyg_spectral/profile/efficiency.py b/pyg_spectral/profile/efficiency.py index bd56b93..93054f1 100755 --- a/pyg_spectral/profile/efficiency.py +++ b/pyg_spectral/profile/efficiency.py @@ -126,7 +126,7 @@ def update(self): class ParamNumel(NumFmt): - r"""Number of learnable parameters in an nn.Module. + r"""Number of learnable parameters in an :class:`torch.nn.Module`. """ def __init__(self, module: Module = None): super(ParamNumel, self).__init__(base=10, suffix='') @@ -141,7 +141,7 @@ def update(self, module: Module): class ParamMemory(NumFmt): - r"""Memory usage of parameters in an nn.Module. + r"""Memory usage of parameters in an :class:`torch.nn.Module`. """ def __init__(self, module: Module = None): super(ParamMemory, self).__init__(base=2, suffix='B')