Why params caculated by get_flops.py not 100%? #8800
Replies: 4 comments 2 replies
-
Anyone knows? |
Beta Was this translation helpful? Give feedback.
-
I find it's the cause of DCN, when I don't add DCN: _base_ = './dw_r50_fpn_1x_coco.py'
# 测试DCN是否有负影响
model = dict(
type='FCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='DWHead',
num_classes=352,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
train_cfg = None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100,
with_nms=True)
)
dataset_type = 'VOCDataset'
data_root = '/gpfs/home/sist/tqzouustc/dataset/QMUL-OpenLogo/'
classes = ('microsoft', 'wordpress', 'bershka', 'yonex', 'nissan', 'allianz_text', 'optus_yes', 'visa', 'evernote', 'barbie', 'michelin', 'aldi', 'sunchips', 'mcdonalds_text', 'pepsi_text1', 'mercedesbenz_text', 'recycling', 'americanexpress', 'supreme', 'bayer', 'spiderman', 'guinness', 'bacardi', 'uniqlo1', 'unitednations', 'honda', 'lays', 'generalelectric', 'bridgestone', 'batman', 'kitkat', 'schwinn', 'adidas', 'ec', 'verizon', 'budweiser', 'lg', 'coke', 'ford', 'xbox', 'ibm', 'wellsfargo', 'mccafe', 'bankofamerica', 'santander', 'toyota', 'budweiser_text', 'oracle', 'base', 'ups', 'disney', 'soundcloud', 'rolex', 'chevrolet', 'chiquita', 'head_text', 'chevron', 'amazon', 'nike', 'standard_liege', 'maxxis', 'kfc', 'cvs', 'abus', 'chevrolet_text', 'heineken_text', 'hyundai', 'soundrop', 'hyundai_text', 'aral', 'cocacola', 'kia', 'esso', 'olympics', 'colgate', 'cpa_australia', 'hersheys', 'twitter', 'youtube', 'head', 'reebok_text', 'fritos', 'citroen', 'bbva', 'prada', 'canon', 'wii', 'axa', 'ikea', 'intel', 'cvspharmacy', 'drpepper', 'gap', 'maserati', 'huawei_text', 'tacobell', 't-mobile', 'burgerking', 'heineken', 'opel', 'bem', 'reeses', 'tnt', 'siemens', 'velveeta', 'sega', 'volkswagen_text', 'bionade', 'sony', 'calvinklein', 'bridgestone_text', 'benrus', 'firelli', 'apple', 'fosters', 'armitron', 'marlboro_text', 'hanes', 'bosch', 'chimay', 'lacoste_text', 'becks', 'carters', 'lv', 'mini', 'timberland', 'citi', 'jagermeister', 'pizzahut', 'bankofamerica_text', 'bik', 'aspirin', 'corona', 'lamborghini', 'republican', 'nissan_text', 'dexia', 'walmart_text', 'basf', 'texaco', 'nbc', 'vaio', 'aquapac_text', 'hm', 'warnerbros', 'hsbc_text', 'sprite', 'johnnywalker', 'uniqlo', 'venus', 'bosch_text', 'hsbc', 'nasa', 'shell', 'spar', 'audi_text', 'aldi_text', 'poloralphlauren', 'lotto', 'cartier', 'lego', 'hp', 'unicef', 'yahoo', 'jacobscreek', 'kelloggs', 'chickfila', 'marlboro_fig', 'windows', 'subway', 'bottegaveneta', 'internetexplorer', 'comedycentral', 'nescafe', 'londonunderground', 'vodafone', 'planters', 'select', 'apc', 'homedepot', 'chanel', 'adidas_text', 'tigerwash', 'costa', 'subaru', 'mcdonalds', 'ebay', 'yonex_text', 'fly_emirates', 'stellaartois', 'rbc', 'armani', 'redbull_text', 'audi', 'doritos', 'obey', 'volvo', 'dunkindonuts', 'skechers', 'honda_text', 'loreal', 'pepsi_text', 'boeing', 'bellataylor', 'verizon_text', 'espn', 'aluratek', 'at_and_t', 'scion_text', 'amcrest_text', 'jackinthebox', 'netflix', 'kraft', 'zara', 'superman', 'google', 'target', 'corona_text', 'umbro', 'goodyear', 'spar_text', 'danone', 'playstation', 'maxwellhouse', 'pampers', 'citroen_text', 'bmw', 'pizzahut_hut', 'pepsi', 'singha', 'mobil', 'underarmour', 'shell_text1', 'hisense', 'optus', 'fritolay', 'ferrari', 'toyota_text', 'bulgari', 'mercedesbenz', 'asus', 'esso_text', 'boeing_text', 'airness', 'porsche_text', 'nvidia', 'suzuki', 'northface', 'walmart', 'millerhighlife', 'bellodigital', 'wellsfargo_text', 'quick', 'kodak', 'porsche', 'mastercard', 'nb', 'fedex', 'coach', 'shell_text', 'medibank', 'infiniti_text', 'philadelphia', 'marlboro', 'allianz', 'miraclewhip', 'infiniti', 'nivea', 'android', 'santander_text', 'cheetos', 'hh', 'puma_text', 'jello', 'teslamotors', 'tsingtao', 'williamhill', 'chanel_text', 'tostitos', 'reebok', 'bbc', 'samsung', 'sap', 'gildan', 'heraldsun', 'reebok1', 'ruffles', 'erdinger', 'huawei', 'dhl', 'anz', 'redbull', 'cisco', 'blackmores', 'lacoste', 'panasonic', 'rolex_text', '3m', 'volkswagen', 'tommyhilfiger', 'tissot', 'us_president', 'jcrew', 'alfaromeo', 'mitsubishi', 'starbucks', 'hermes', 'bfgoodrich', 'facebook', 'carlsberg', 'renault', 'mk', 'apecase', 'bellodigital_text', 'firefox', 'jurlique', 'burgerking_text', 'luxottica', 'adidas1', 'motorola', 'lexus', 'lexus_text', 'nike_text', 'barclays', 'milka', 'amcrest', 'nintendo', 'athalon', 'target_text', 'anz_text', 'costco', 'homedepot_text', 'mtv', 'rittersport', 'converse', 'aluratek_text', 'paulaner', 'americanexpress_text', 'airhawk', 'gucci', 'thomsonreuters', 'yamaha', 'caterpillar', 'accenture', 'allett', 'levis', 'total', 'philips', 'gillette', 'puma', 'carglass', 'blizzardentertainment',)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
classes=classes,
ann_file=[
data_root + 'VOC2007/ImageSets/Main/trainval.txt'#,
#data_root + 'VOC2012/ImageSets/Main/trainval.txt'
],
#img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
img_prefix=[data_root + 'VOC2007/'],
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='mAP')
lr_config = dict(step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
custom_imports=dict(
imports=[
'dw_head'
])
checkpoint_config = dict(interval=1) The params caculation seems to be right: FCOS(
32.717 M, 100.000% Params, 133.213 GFLOPs, 100.000% FLOPs,
(backbone): ResNet(
23.283 M, 71.163% Params, 51.119 GFLOPs, 38.374% FLOPs,
(conv1): Conv2d(0.0 M, 0.000% Params, 1.464 GFLOPs, 1.099% FLOPs, 3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(0.0 M, 0.000% Params, 0.02 GFLOPs, 0.015% FLOPs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 M, 0.000% Params, 0.01 GFLOPs, 0.007% FLOPs, inplace=True)
(maxpool): MaxPool2d(0.0 M, 0.000% Params, 0.01 GFLOPs, 0.007% FLOPs, kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): ResLayer(
0.0 M, 0.000% Params, 8.442 GFLOPs, 6.337% FLOPs,
(0): Bottleneck(
0.0 M, 0.000% Params, 2.934 GFLOPs, 2.202% FLOPs,
(conv1): Conv2d(0.0 M, 0.000% Params, 0.159 GFLOPs, 0.120% FLOPs, 64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 M, 0.000% Params, 0.005 GFLOPs, 0.004% FLOPs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(0.0 M, 0.000% Params, 1.434 GFLOPs, 1.077% FLOPs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.0 M, 0.000% Params, 0.005 GFLOPs, 0.004% FLOPs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(0.0 M, 0.000% Params, 0.638 GFLOPs, 0.479% FLOPs, 64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(0.0 M, 0.000% Params, 0.02 GFLOPs, 0.015% FLOPs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 M, 0.000% Params, 0.015 GFLOPs, 0.011% FLOPs, inplace=True)
(downsample): Sequential(
0.0 M, 0.000% Params, 0.657 GFLOPs, 0.494% FLOPs,
(0): Conv2d(0.0 M, 0.000% Params, 0.638 GFLOPs, 0.479% FLOPs, 64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 M, 0.000% Params, 0.02 GFLOPs, 0.015% FLOPs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): Bottleneck(
0.0 M, 0.000% Params, 2.754 GFLOPs, 2.068% FLOPs,
(conv1): Conv2d(0.0 M, 0.000% Params, 0.638 GFLOPs, 0.479% FLOPs, 256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 M, 0.000% Params, 0.005 GFLOPs, 0.004% FLOPs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(0.0 M, 0.000% Params, 1.434 GFLOPs, 1.077% FLOPs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.0 M, 0.000% Params, 0.005 GFLOPs, 0.004% FLOPs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(0.0 M, 0.000% Params, 0.638 GFLOPs, 0.479% FLOPs, 64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(0.0 M, 0.000% Params, 0.02 GFLOPs, 0.015% FLOPs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 M, 0.000% Params, 0.015 GFLOPs, 0.011% FLOPs, inplace=True)
)
(2): Bottleneck(
0.0 M, 0.000% Params, 2.754 GFLOPs, 2.068% FLOPs,
(conv1): Conv2d(0.0 M, 0.000% Params, 0.638 GFLOPs, 0.479% FLOPs, 256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 M, 0.000% Params, 0.005 GFLOPs, 0.004% FLOPs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(0.0 M, 0.000% Params, 1.434 GFLOPs, 1.077% FLOPs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.0 M, 0.000% Params, 0.005 GFLOPs, 0.004% FLOPs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(0.0 M, 0.000% Params, 0.638 GFLOPs, 0.479% FLOPs, 64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(0.0 M, 0.000% Params, 0.02 GFLOPs, 0.015% FLOPs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 M, 0.000% Params, 0.015 GFLOPs, 0.011% FLOPs, inplace=True)
)
) But when I add the DCN to ResNet backbone, I just get the same result as above: _base_ = './dw_r50_fpn_1x_coco.py'
# 测试DCN是否有负影响
model = dict(
type='FCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='DWHead',
num_classes=352,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
train_cfg = None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100,
with_nms=True)
)
dataset_type = 'VOCDataset'
data_root = '/gpfs/home/sist/tqzouustc/dataset/QMUL-OpenLogo/'
classes = ('microsoft', 'wordpress', 'bershka', 'yonex', 'nissan', 'allianz_text', 'optus_yes', 'visa', 'evernote', 'barbie', 'michelin', 'aldi', 'sunchips', 'mcdonalds_text', 'pepsi_text1', 'mercedesbenz_text', 'recycling', 'americanexpress', 'supreme', 'bayer', 'spiderman', 'guinness', 'bacardi', 'uniqlo1', 'unitednations', 'honda', 'lays', 'generalelectric', 'bridgestone', 'batman', 'kitkat', 'schwinn', 'adidas', 'ec', 'verizon', 'budweiser', 'lg', 'coke', 'ford', 'xbox', 'ibm', 'wellsfargo', 'mccafe', 'bankofamerica', 'santander', 'toyota', 'budweiser_text', 'oracle', 'base', 'ups', 'disney', 'soundcloud', 'rolex', 'chevrolet', 'chiquita', 'head_text', 'chevron', 'amazon', 'nike', 'standard_liege', 'maxxis', 'kfc', 'cvs', 'abus', 'chevrolet_text', 'heineken_text', 'hyundai', 'soundrop', 'hyundai_text', 'aral', 'cocacola', 'kia', 'esso', 'olympics', 'colgate', 'cpa_australia', 'hersheys', 'twitter', 'youtube', 'head', 'reebok_text', 'fritos', 'citroen', 'bbva', 'prada', 'canon', 'wii', 'axa', 'ikea', 'intel', 'cvspharmacy', 'drpepper', 'gap', 'maserati', 'huawei_text', 'tacobell', 't-mobile', 'burgerking', 'heineken', 'opel', 'bem', 'reeses', 'tnt', 'siemens', 'velveeta', 'sega', 'volkswagen_text', 'bionade', 'sony', 'calvinklein', 'bridgestone_text', 'benrus', 'firelli', 'apple', 'fosters', 'armitron', 'marlboro_text', 'hanes', 'bosch', 'chimay', 'lacoste_text', 'becks', 'carters', 'lv', 'mini', 'timberland', 'citi', 'jagermeister', 'pizzahut', 'bankofamerica_text', 'bik', 'aspirin', 'corona', 'lamborghini', 'republican', 'nissan_text', 'dexia', 'walmart_text', 'basf', 'texaco', 'nbc', 'vaio', 'aquapac_text', 'hm', 'warnerbros', 'hsbc_text', 'sprite', 'johnnywalker', 'uniqlo', 'venus', 'bosch_text', 'hsbc', 'nasa', 'shell', 'spar', 'audi_text', 'aldi_text', 'poloralphlauren', 'lotto', 'cartier', 'lego', 'hp', 'unicef', 'yahoo', 'jacobscreek', 'kelloggs', 'chickfila', 'marlboro_fig', 'windows', 'subway', 'bottegaveneta', 'internetexplorer', 'comedycentral', 'nescafe', 'londonunderground', 'vodafone', 'planters', 'select', 'apc', 'homedepot', 'chanel', 'adidas_text', 'tigerwash', 'costa', 'subaru', 'mcdonalds', 'ebay', 'yonex_text', 'fly_emirates', 'stellaartois', 'rbc', 'armani', 'redbull_text', 'audi', 'doritos', 'obey', 'volvo', 'dunkindonuts', 'skechers', 'honda_text', 'loreal', 'pepsi_text', 'boeing', 'bellataylor', 'verizon_text', 'espn', 'aluratek', 'at_and_t', 'scion_text', 'amcrest_text', 'jackinthebox', 'netflix', 'kraft', 'zara', 'superman', 'google', 'target', 'corona_text', 'umbro', 'goodyear', 'spar_text', 'danone', 'playstation', 'maxwellhouse', 'pampers', 'citroen_text', 'bmw', 'pizzahut_hut', 'pepsi', 'singha', 'mobil', 'underarmour', 'shell_text1', 'hisense', 'optus', 'fritolay', 'ferrari', 'toyota_text', 'bulgari', 'mercedesbenz', 'asus', 'esso_text', 'boeing_text', 'airness', 'porsche_text', 'nvidia', 'suzuki', 'northface', 'walmart', 'millerhighlife', 'bellodigital', 'wellsfargo_text', 'quick', 'kodak', 'porsche', 'mastercard', 'nb', 'fedex', 'coach', 'shell_text', 'medibank', 'infiniti_text', 'philadelphia', 'marlboro', 'allianz', 'miraclewhip', 'infiniti', 'nivea', 'android', 'santander_text', 'cheetos', 'hh', 'puma_text', 'jello', 'teslamotors', 'tsingtao', 'williamhill', 'chanel_text', 'tostitos', 'reebok', 'bbc', 'samsung', 'sap', 'gildan', 'heraldsun', 'reebok1', 'ruffles', 'erdinger', 'huawei', 'dhl', 'anz', 'redbull', 'cisco', 'blackmores', 'lacoste', 'panasonic', 'rolex_text', '3m', 'volkswagen', 'tommyhilfiger', 'tissot', 'us_president', 'jcrew', 'alfaromeo', 'mitsubishi', 'starbucks', 'hermes', 'bfgoodrich', 'facebook', 'carlsberg', 'renault', 'mk', 'apecase', 'bellodigital_text', 'firefox', 'jurlique', 'burgerking_text', 'luxottica', 'adidas1', 'motorola', 'lexus', 'lexus_text', 'nike_text', 'barclays', 'milka', 'amcrest', 'nintendo', 'athalon', 'target_text', 'anz_text', 'costco', 'homedepot_text', 'mtv', 'rittersport', 'converse', 'aluratek_text', 'paulaner', 'americanexpress_text', 'airhawk', 'gucci', 'thomsonreuters', 'yamaha', 'caterpillar', 'accenture', 'allett', 'levis', 'total', 'philips', 'gillette', 'puma', 'carglass', 'blizzardentertainment',)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
classes=classes,
ann_file=[
data_root + 'VOC2007/ImageSets/Main/trainval.txt'#,
#data_root + 'VOC2012/ImageSets/Main/trainval.txt'
],
#img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
img_prefix=[data_root + 'VOC2007/'],
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='mAP')
lr_config = dict(step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
custom_imports=dict(
imports=[
'dw_head'
])
checkpoint_config = dict(interval=1) Why did this happen? |
Beta Was this translation helpful? Give feedback.
-
And it's really strange that the GFLOPs reduce after add DCN. |
Beta Was this translation helpful? Give feedback.
-
The calculation is currently not so accurate. As you can see, for now, calculating FLOPS of extension operators like DCN is not supported. Therefore, when adding DCN, the calculation of them is skipped and the GFLOPS is reduced. |
Beta Was this translation helpful? Give feedback.
-
The caculation result of FCOS by get_flops.py is as below:
The params of Backbone+neck+head should be 100% params. Why is it 59.77% params?
Beta Was this translation helpful? Give feedback.
All reactions