diff --git a/keras_hub/src/models/efficientnet/fusedmbconv.py b/keras_hub/src/models/efficientnet/fusedmbconv.py index 8d2cc2fdef..51a7f95fef 100644 --- a/keras_hub/src/models/efficientnet/fusedmbconv.py +++ b/keras_hub/src/models/efficientnet/fusedmbconv.py @@ -47,6 +47,9 @@ class FusedMBConvBlock(keras.layers.Layer): se_ratio: default 0.0, The filters used in the Squeeze-Excitation phase, and are chosen as the maximum between 1 and input_filters*se_ratio batch_norm_momentum: default 0.9, the BatchNormalization momentum + batch_norm_epsilon: default 1e-3, float, epsilon for batch norm + calcualtions. Used in denominator for calculations to prevent divide + by 0 errors. activation: default "swish", the activation function used between convolution operations dropout: float, the optional dropout rate to apply before the output diff --git a/keras_hub/src/models/efficientnet/mbconv.py b/keras_hub/src/models/efficientnet/mbconv.py index e9acbfeb9a..b4dc05f7c9 100644 --- a/keras_hub/src/models/efficientnet/mbconv.py +++ b/keras_hub/src/models/efficientnet/mbconv.py @@ -62,6 +62,9 @@ def __init__( is above 0. The filters used in this phase are chosen as the maximum between 1 and input_filters*se_ratio batch_norm_momentum: default 0.9, the BatchNormalization momentum + batch_norm_epsilon: default 1e-3, float, epsilon for batch norm + calcualtions. Used in denominator for calculations to prevent + divide by 0 errors. activation: default "swish", the activation function used between convolution operations dropout: float, the optional dropout rate to apply before the output