diff --git a/datasets/vggface2.py b/datasets/vggface2.py index a8b0ae8ef..cbbf87898 100644 --- a/datasets/vggface2.py +++ b/datasets/vggface2.py @@ -26,11 +26,11 @@ from torchvision import transforms import torchvision.transforms.functional as FT -from utils import augmentation_utils from skimage import transform as trans from hawk_eyes.face import RetinaFace import kornia.geometry.transform as GT +from utils import augmentation_utils import ai8x @@ -157,6 +157,8 @@ def __getitem__(self, index): if self.mode == 'identification_dr': return self.__getitem_identification_dr(index) + #Will never reached + return None def __getitem_detection(self, index): diff --git a/models/ai85net-faceid_112.py b/models/ai85net-faceid_112.py index c1d86731b..be8a3e19d 100644 --- a/models/ai85net-faceid_112.py +++ b/models/ai85net-faceid_112.py @@ -11,11 +11,12 @@ """ -import ai8x -import ai8x_blocks from torch import nn import torch.nn.functional as F +import ai8x +import ai8x_blocks + class AI85FaceIDNet_112(nn.Module): """ @@ -28,7 +29,7 @@ def __init__( # pylint: disable=too-many-arguments bottleneck_settings, last_layer_width, emb_dimensionality, - num_classes, + num_classes=None, # pylint: disable=unused-argument avg_pool_size=(7,7), num_channels=3, dimensions=(112, 112), # pylint: disable=unused-argument @@ -38,25 +39,31 @@ def __init__( # pylint: disable=too-many-arguments **kwargs ): super().__init__() + # bias = False due to streaming self.pre_stage = ai8x.FusedConv2dReLU(num_channels, bottleneck_settings[0][1], 3, padding=1, stride=pre_layer_stride, - bias=False, **kwargs) # bias = False due to streaming - self.pre_stage_2 = ai8x.FusedMaxPoolConv2dReLU(bottleneck_settings[0][1], bottleneck_settings[0][1], 3, padding=1, stride=1, - pool_size=2, pool_stride=2, - bias=False, **kwargs) # bias = False due to streaming + bias=False, **kwargs) + # bias = False due to streaming + self.pre_stage_2 = ai8x.FusedMaxPoolConv2dReLU(bottleneck_settings[0][1], + bottleneck_settings[0][1], 3, padding=1, + stride=1, pool_size=2, pool_stride=2, + bias=False, **kwargs) self.feature_stage = nn.ModuleList([]) for setting in bottleneck_settings: - self._create_bottleneck_stage(setting, bias, depthwise_bias, reduced_depthwise_bias, **kwargs) + self._create_bottleneck_stage(setting, bias, depthwise_bias, + reduced_depthwise_bias, **kwargs) self.post_stage = ai8x.FusedConv2dReLU(bottleneck_settings[-1][2], last_layer_width, 1, padding=0, stride=1, bias=False, **kwargs) - self.pre_avg = ai8x.Conv2d(last_layer_width, last_layer_width, 3, padding=1, stride=1, bias = False, **kwargs) + self.pre_avg = ai8x.Conv2d(last_layer_width, last_layer_width, 3, padding=1, stride=1, + bias = False, **kwargs) self.avg_pool = ai8x.AvgPool2d(avg_pool_size, stride=1) self.linear = ai8x.Linear(last_layer_width, emb_dimensionality, bias=bias, **kwargs) - def _create_bottleneck_stage(self, setting, bias, depthwise_bias, reduced_depthwise_bias, **kwargs): + def _create_bottleneck_stage(self, setting, bias, depthwise_bias, + reduced_depthwise_bias, **kwargs): """Function to create bottlencek stage. Setting format is: [num_repeat, in_channels, out_channels, stride, expansion_factor] """ @@ -77,8 +84,8 @@ def _create_bottleneck_stage(self, setting, bias, depthwise_bias, reduced_depthw stride=1, expansion_factor=setting[4], bias=bias, - depthwise_bias=(i%2==0) and depthwise_bias, #Reduce depthwise bias - **kwargs)) + depthwise_bias=(i%2==0) and + depthwise_bias, **kwargs)) else: stage.append(ai8x_blocks.ConvResidualBottleneck(in_channels=setting[2], out_channels=setting[2], @@ -120,7 +127,8 @@ def ai85faceidnet_112(pretrained=False, **kwargs): ] return AI85FaceIDNet_112(pre_layer_stride=1, bottleneck_settings=bottleneck_settings, - last_layer_width=128, emb_dimensionality=64, avg_pool_size=(7,7), depthwise_bias=True, reduced_depthwise_bias = True, **kwargs) + last_layer_width=128, emb_dimensionality=64, avg_pool_size=(7,7), + depthwise_bias=True, reduced_depthwise_bias = True, **kwargs) models = [ {