diff --git a/docs/numpy_ml.neural_nets.initializers.rst b/docs/numpy_ml.neural_nets.initializers.rst index 359be5a..9793c26 100644 --- a/docs/numpy_ml.neural_nets.initializers.rst +++ b/docs/numpy_ml.neural_nets.initializers.rst @@ -2,28 +2,28 @@ Initializers ============= ``ActivationInitializer`` ------------------------- +-------------------------- .. autoclass:: numpy_ml.neural_nets.initializers.ActivationInitializer :members: :undoc-members: :inherited-members: ``OptimizerInitializer`` ------------------------- +-------------------------- .. autoclass:: numpy_ml.neural_nets.initializers.OptimizerInitializer :members: :undoc-members: :inherited-members: ``SchedulerInitializer`` ------------------------- +-------------------------- .. autoclass:: numpy_ml.neural_nets.initializers.SchedulerInitializer :members: :undoc-members: :inherited-members: ``WeightInitializer`` ----------------------- +------------------------ .. autoclass:: numpy_ml.neural_nets.initializers.WeightInitializer :members: :undoc-members: diff --git a/docs/numpy_ml.utils.distance_metrics.rst b/docs/numpy_ml.utils.distance_metrics.rst index f3111c2..b763c5a 100644 --- a/docs/numpy_ml.utils.distance_metrics.rst +++ b/docs/numpy_ml.utils.distance_metrics.rst @@ -4,21 +4,21 @@ Distance metrics Common distance functions. ``euclidean`` -------------- +--------------- .. autofunction:: numpy_ml.utils.distance_metrics.euclidean ``chebyshev`` -------------- +--------------- .. autofunction:: numpy_ml.utils.distance_metrics.chebyshev ``hamming`` ------------ +------------- .. autofunction:: numpy_ml.utils.distance_metrics.hamming ``manhattan`` ------------- +-------------- .. autofunction:: numpy_ml.utils.distance_metrics.manhattan ``minkowski`` ------------- +-------------- .. autofunction:: numpy_ml.utils.distance_metrics.minkowski diff --git a/numpy_ml/neural_nets/layers/layers.py b/numpy_ml/neural_nets/layers/layers.py index 44e1a3f..198fe13 100644 --- a/numpy_ml/neural_nets/layers/layers.py +++ b/numpy_ml/neural_nets/layers/layers.py @@ -169,6 +169,20 @@ def __init__(self, scale=True, dropout_p=0, init="glorot_uniform", optimizer=Non within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. Unused. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Unused + parameters : dict + Unused + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -180,6 +194,9 @@ def __init__(self, scale=True, dropout_p=0, init="glorot_uniform", optimizer=Non def _init_params(self): self.softmax = Dropout(Softmax(), self.dropout_p) smdv = self.softmax.derived_variables + + self.gradients = {} + self.parameters = {} self.derived_variables = { "attention_weights": [], "dropout_mask": smdv["wrappers"][0]["dropout_mask"], @@ -364,6 +381,20 @@ def __init__(self, n_out, K=1, init="glorot_uniform", optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Unused + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -616,12 +647,28 @@ def __init__(self, act_fn=None, optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Unused + parameters : dict + Unused + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) self.act_fn = ActivationInitializer(act_fn)() self._init_params() def _init_params(self): + self.gradients = {} + self.parameters = {} self.derived_variables = {"sum": []} @property @@ -712,12 +759,28 @@ def __init__(self, act_fn=None, optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Unused + parameters : dict + Unused + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) self.act_fn = ActivationInitializer(act_fn)() self._init_params() def _init_params(self): + self.gradients = {} + self.parameters = {} self.derived_variables = {"product": []} @property @@ -809,6 +872,20 @@ def __init__(self, keep_dim="first", optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Unused + gradients : dict + Unused + parameters : dict + Unused + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -816,7 +893,6 @@ def __init__(self, keep_dim="first", optimizer=None): self._init_params() def _init_params(self): - self.X = [] self.gradients = {} self.parameters = {} self.derived_variables = {"in_dims": []} @@ -943,6 +1019,20 @@ def __init__(self, momentum=0.9, epsilon=1e-5, optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -1028,7 +1118,7 @@ def forward(self, X, retain_derived=True): features for a minibatch of `n_ex` examples. retain_derived : bool Whether to use the current intput to adjust the running mean and - running_var computations. Setting this to True is the same as + running_var computations. Setting this to False is the same as freezing the layer for the current input. Default is True. Returns @@ -1177,6 +1267,20 @@ def __init__(self, momentum=0.9, epsilon=1e-5, optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -1368,6 +1472,20 @@ def __init__(self, epsilon=1e-5, optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -1543,6 +1661,20 @@ def __init__(self, epsilon=1e-5, optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -1710,6 +1842,20 @@ def __init__( within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) fstr = "'pool' must be either 'sum', 'mean', or None but got '{}'" @@ -1891,6 +2037,20 @@ def __init__(self, n_out, act_fn=None, init="glorot_uniform", optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -2060,6 +2220,20 @@ def __init__(self, dim=-1, optimizer=None): within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. Unused for this layer. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -2218,6 +2392,20 @@ def __init__( within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -2465,6 +2653,20 @@ def __init__( within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -2742,6 +2944,20 @@ def __init__( within the :meth:`update` method. If None, use the :class:`SGD ` optimizer with default parameters. Default is None. + + Attributes + ---------- + X : list + Running list of inputs to the :meth:`forward ` method since the last call to :meth:`update `. Only updated if the `retain_derived` argument was set to True. + gradients : dict + Dictionary of loss gradients with regard to the layer parameters + parameters : dict + Dictionary of layer parameters + hyperparameters : dict + Dictionary of layer hyperparameters + derived_variables : dict + Dictionary of any intermediate values computed during + forward/backward propagation. """ # noqa: E501 super().__init__(optimizer) @@ -2770,7 +2986,7 @@ def _init_params(self): @property def hyperparameters(self): - """Return a dictionary containing the layer hyperparameters.""" + """A dictionary containing the layer hyperparameters.""" return { "layer": "Conv2D", "pad": self.pad, @@ -2844,8 +3060,7 @@ def backward(self, dLdy, retain_grads=True): Parameters ---------- - dLdy : :py:class:`ndarray ` of shape `(n_ex, out_rows, - out_cols, out_ch)` or list of arrays + dLdy : :py:class:`ndarray ` of shape `(n_ex, out_rows, out_cols, out_ch)` or list of arrays The gradient(s) of the loss with respect to the layer output(s). retain_grads : bool Whether to include the intermediate parameter gradients computed