@@ -42,16 +42,10 @@ def main():
42
42
rgb_read_format = "{:05d}.jpg"
43
43
elif args .dataset == 'hmdb51' :
44
44
num_class = 51
45
- < << << << HEAD
46
45
rgb_read_format = "{:05d}.jpg"
47
46
elif args .dataset == 'kinetics' :
48
47
num_class = 400
49
48
rgb_read_format = "{:04d}.jpg"
50
- == == == =
51
- elif args .dataset == 'kinetics' :
52
- num_class = 400
53
- rgb_read_format = "{:05d}.jpg"
54
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
55
49
elif args .dataset == 'something' :
56
50
num_class = 174
57
51
rgb_read_format = "{:04d}.jpg"
@@ -83,42 +77,11 @@ def main():
83
77
84
78
print ("pretrained_parts: " , args .pretrained_parts )
85
79
86
- << << << < HEAD
87
- == == == =
88
- if args .arch == "ECO" :
89
- new_state_dict = init_ECO (model_dict )
90
- if args .arch == "ECOfull" :
91
- new_state_dict = init_ECOfull (model_dict )
92
- elif args .arch == "C3DRes18" :
93
- new_state_dict = init_C3DRes18 (model_dict )
94
-
95
- un_init_dict_keys = [k for k in model_dict .keys () if k not in new_state_dict ]
96
- print ("un_init_dict_keys: " , un_init_dict_keys )
97
- print ("\n ------------------------------------" )
98
-
99
- for k in un_init_dict_keys :
100
- new_state_dict [k ] = torch .DoubleTensor (model_dict [k ].size ()).zero_ ()
101
- if 'weight' in k :
102
- if 'bn' in k :
103
- print ("{} init as: 1" .format (k ))
104
- constant_ (new_state_dict [k ], 1 )
105
- else :
106
- print ("{} init as: xavier" .format (k ))
107
- xavier_uniform_ (new_state_dict [k ])
108
- elif 'bias' in k :
109
- print ("{} init as: 0" .format (k ))
110
- constant_ (new_state_dict [k ], 0 )
111
-
112
- print ("------------------------------------" )
113
-
114
- model .load_state_dict (new_state_dict )
115
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
116
80
117
81
if args .resume :
118
82
if os .path .isfile (args .resume ):
119
83
print (("=> loading checkpoint '{}'" .format (args .resume )))
120
84
checkpoint = torch .load (args .resume )
121
- < << << << HEAD
122
85
# if not checkpoint['lr']:
123
86
if "lr" not in checkpoint .keys ():
124
87
args .lr = input ("No 'lr' attribute found in resume model, please input the 'lr' manually: " )
@@ -163,24 +126,12 @@ def main():
163
126
model .load_state_dict (new_state_dict )
164
127
165
128
166
- == == == =
167
- args .start_epoch = checkpoint ['epoch' ]
168
- best_prec1 = checkpoint ['best_prec1' ]
169
- model .load_state_dict (checkpoint ['state_dict' ])
170
- print (("=> loaded checkpoint '{}' (epoch {})"
171
- .format (args .resume , checkpoint ['epoch' ])))
172
- else :
173
- print (("=> no checkpoint found at '{}'" .format (args .resume )))
174
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
175
129
176
130
cudnn .benchmark = True
177
131
178
132
# Data loading code
179
133
if args .modality != 'RGBDiff' :
180
- < << << << HEAD
181
134
#input_mean = [0,0,0] #for debugging
182
- == == == =
183
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
184
135
normalize = GroupNormalize (input_mean , input_std )
185
136
else :
186
137
normalize = IdentityTransform ()
@@ -215,11 +166,8 @@ def main():
215
166
GroupCenterCrop (crop_size ),
216
167
Stack (roll = True ),
217
168
ToTorchFormatTensor (div = False ),
218
- < << << << HEAD
219
169
#Stack(roll=(args.arch == 'C3DRes18') or (args.arch == 'ECO') or (args.arch == 'ECOfull') or (args.arch == 'ECO_2FC')),
220
170
#ToTorchFormatTensor(div=(args.arch != 'C3DRes18') and (args.arch != 'ECO') and (args.arch != 'ECOfull') and (args.arch != 'ECO_2FC')),
221
- == == == =
222
- >> > >> > > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
223
171
normalize ,
224
172
])),
225
173
batch_size = args .batch_size , shuffle = False ,
@@ -244,7 +192,6 @@ def main():
244
192
validate (val_loader , model , criterion , 0 )
245
193
return
246
194
247
- < << << << HEAD
248
195
saturate_cnt = 0
249
196
exp_num = 0
250
197
@@ -254,10 +201,6 @@ def main():
254
201
saturate_cnt = 0
255
202
print ("- Learning rate decreases by a factor of '{}'" .format (10 ** (exp_num )))
256
203
adjust_learning_rate (optimizer , epoch , args .lr_steps , exp_num )
257
- == == == =
258
- for epoch in range (args .start_epoch , args .epochs ):
259
- adjust_learning_rate (optimizer , epoch , args .lr_steps )
260
- > >> >> >> 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
261
204
262
205
# train for one epoch
263
206
train (train_loader , model , criterion , optimizer , epoch )
@@ -268,25 +211,19 @@ def main():
268
211
269
212
# remember best prec@1 and save checkpoint
270
213
is_best = prec1 > best_prec1
271
- < << << << HEAD
272
214
if is_best :
273
215
saturate_cnt = 0
274
216
else :
275
217
saturate_cnt = saturate_cnt + 1
276
218
277
219
print ("- Validation Prec@1 saturates for {} epochs." .format (saturate_cnt ))
278
- == == == =
279
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
280
220
best_prec1 = max (prec1 , best_prec1 )
281
221
save_checkpoint ({
282
222
'epoch' : epoch + 1 ,
283
223
'arch' : args .arch ,
284
224
'state_dict' : model .state_dict (),
285
225
'best_prec1' : best_prec1 ,
286
- << << << < HEAD
287
226
'lr' : optimizer .param_groups [- 1 ]['lr' ],
288
- == == == =
289
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
290
227
}, is_best )
291
228
292
229
def init_ECO (model_dict ):
@@ -299,7 +236,6 @@ def init_ECO(model_dict):
299
236
300
237
elif args .pretrained_parts == "2D" :
301
238
302
- < << << << HEAD
303
239
if args .net_model2D is not None :
304
240
pretrained_dict_2d = torch .load (args .net_model2D )
305
241
print (("=> loading model - 2D net: '{}'" .format (args .net_model2D )))
@@ -316,25 +252,18 @@ def init_ECO(model_dict):
316
252
print ("Problem!" )
317
253
print ("k: {}, size: {}" .format (k ,v .shape ))
318
254
319
- == == == =
320
- pretrained_dict_2d = torch .utils .model_zoo .load_url (weight_url_2d )
321
- > >> >> >> 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
322
255
new_state_dict = {"module.base_model." + k : v for k , v in pretrained_dict_2d ['state_dict' ].items () if "module.base_model." + k in model_dict }
323
256
324
257
elif args .pretrained_parts == "3D" :
325
258
326
259
new_state_dict = {}
327
- << << << < HEAD
328
260
if args .net_model3D is not None :
329
261
pretrained_dict_3d = torch .load (args .net_model3D )
330
262
print (("=> loading model - 3D net: '{}'" .format (args .net_model3D )))
331
263
else :
332
264
pretrained_dict_3d = torch .load ("models/C3DResNet18_rgb_16F_kinetics_v1.pth.tar" )
333
265
print (("=> loading model - 3D net-url: '{}'" .format ("models/C3DResNet18_rgb_16F_kinetics_v1.pth.tar" )))
334
266
335
- == == == =
336
- pretrained_dict_3d = torch .load ("models/C3DResNet18_rgb_16F_kinetics_v1.pth.tar" )
337
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
338
267
for k , v in pretrained_dict_3d ['state_dict' ].items ():
339
268
if (k in model_dict ) and (v .size () == model_dict [k ].size ()):
340
269
new_state_dict [k ] = v
@@ -344,7 +273,6 @@ def init_ECO(model_dict):
344
273
345
274
346
275
elif args .pretrained_parts == "finetune" :
347
- << << << < HEAD
348
276
print (args .net_modelECO )
349
277
print ("88" * 40 )
350
278
if args .net_modelECO is not None :
@@ -357,18 +285,12 @@ def init_ECO(model_dict):
357
285
358
286
359
287
360
- == == == =
361
-
362
- print (("=> loading model '{}'" .format ("models/eco_lite_rgb_16F_kinetics_v2.pth.tar" )))
363
- pretrained_dict = torch .load ("models/eco_lite_rgb_16F_kinetics_v2.pth.tar" )
364
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
365
288
new_state_dict = {k : v for k , v in pretrained_dict ['state_dict' ].items () if (k in model_dict ) and (v .size () == model_dict [k ].size ())}
366
289
print ("*" * 50 )
367
290
print ("Start finetuning .." )
368
291
369
292
elif args .pretrained_parts == "both" :
370
293
371
- << << << < HEAD
372
294
# Load the 2D net pretrained model
373
295
if args .net_model2D is not None :
374
296
pretrained_dict_2d = torch .load (args .net_model2D )
@@ -389,21 +311,12 @@ def init_ECO(model_dict):
389
311
390
312
new_state_dict = {"module.base_model." + k : v for k , v in pretrained_dict_2d ['state_dict' ].items () if "module.base_model." + k in model_dict }
391
313
392
- == == == =
393
- pretrained_dict_2d = torch .utils .model_zoo .load_url (weight_url_2d )
394
- new_state_dict = {"module.base_model." + k : v for k , v in pretrained_dict_2d ['state_dict' ].items () if "module.base_model." + k in model_dict }
395
- pretrained_dict_3d = torch .load ("models/C3DResNet18_rgb_16F_kinetics_v1.pth.tar" )
396
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
397
314
for k , v in pretrained_dict_3d ['state_dict' ].items ():
398
315
if (k in model_dict ) and (v .size () == model_dict [k ].size ()):
399
316
new_state_dict [k ] = v
400
317
401
318
res3a_2_weight_chunk = torch .chunk (pretrained_dict_3d ["state_dict" ]["module.base_model.res3a_2.weight" ], 4 , 1 )
402
319
new_state_dict ["module.base_model.res3a_2.weight" ] = torch .cat ((res3a_2_weight_chunk [0 ], res3a_2_weight_chunk [1 ], res3a_2_weight_chunk [2 ]), 1 )
403
- < << << << HEAD
404
- == == == =
405
-
406
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
407
320
return new_state_dict
408
321
409
322
def init_ECOfull (model_dict ):
@@ -431,7 +344,6 @@ def init_ECOfull(model_dict):
431
344
new_state_dict ["module.base_model.res3a_2.weight" ] = torch .cat ((res3a_2_weight_chunk [0 ], res3a_2_weight_chunk [1 ], res3a_2_weight_chunk [2 ]), 1 )
432
345
433
346
434
- < << << << HEAD
435
347
436
348
elif args .pretrained_parts == "finetune" :
437
349
print (args .net_modelECO )
@@ -443,19 +355,12 @@ def init_ECOfull(model_dict):
443
355
pretrained_dict = torch .load ("models/eco_lite_rgb_16F_kinetics_v2.pth.tar" )
444
356
print (("=> loading model-finetune-url: '{}'" .format ("models/eco_lite_rgb_16F_kinetics_v2.pth.tar" )))
445
357
446
- == == == =
447
- elif args .pretrained_parts == "finetune" :
448
-
449
- print (("=> loading model '{}'" .format ("models/eco_lite_rgb_16F_kinetics_v2.pth.tar" )))
450
- pretrained_dict = torch .load ("models/eco_lite_rgb_16F_kinetics_v2.pth.tar" )
451
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
452
358
new_state_dict = {k : v for k , v in pretrained_dict ['state_dict' ].items () if (k in model_dict ) and (v .size () == model_dict [k ].size ())}
453
359
print ("*" * 50 )
454
360
print ("Start finetuning .." )
455
361
456
362
elif args .pretrained_parts == "both" :
457
363
458
- << << << < HEAD
459
364
# Load the 2D net pretrained model
460
365
if args .net_model2D is not None :
461
366
pretrained_dict_2d = torch .load (args .net_model2D )
@@ -476,22 +381,12 @@ def init_ECOfull(model_dict):
476
381
print (("=> loading model - 3D net-url: '{}'" .format ("models/C3DResNet18_rgb_16F_kinetics_v1.pth.tar" )))
477
382
478
383
479
- == == == =
480
- pretrained_dict_2d = torch .utils .model_zoo .load_url (weight_url_2d )
481
- new_state_dict = {"module.base_model." + k : v for k , v in pretrained_dict_2d ['state_dict' ].items () if "module.base_model." + k in model_dict }
482
- pretrained_dict_3d = torch .load ("models/C3DResNet18_rgb_16F_kinetics_v1.pth.tar" )
483
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
484
384
for k , v in pretrained_dict_3d ['state_dict' ].items ():
485
385
if (k in model_dict ) and (v .size () == model_dict [k ].size ()):
486
386
new_state_dict [k ] = v
487
387
488
- < << << << HEAD
489
388
#res3a_2_weight_chunk = torch.chunk(pretrained_dict_3d["state_dict"]["module.base_model.res3a_2.weight"], 4, 1)
490
389
#new_state_dict["module.base_model.res3a_2.weight"] = torch.cat((res3a_2_weight_chunk[0], res3a_2_weight_chunk[1], res3a_2_weight_chunk[2]), 1)
491
- == == == =
492
- res3a_2_weight_chunk = torch .chunk (pretrained_dict_3d ["state_dict" ]["module.base_model.res3a_2.weight" ], 4 , 1 )
493
- new_state_dict ["module.base_model.res3a_2.weight" ] = torch .cat ((res3a_2_weight_chunk [0 ], res3a_2_weight_chunk [1 ], res3a_2_weight_chunk [2 ]), 1 )
494
- >> >> >> > 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
495
390
496
391
return new_state_dict
497
392
@@ -526,18 +421,13 @@ def train(train_loader, model, criterion, optimizer, epoch):
526
421
model .train ()
527
422
528
423
end = time .time ()
529
- < << << << HEAD
530
424
531
425
loss_summ = 0
532
426
localtime = time .localtime ()
533
427
end_time = time .strftime ("%Y/%m/%d-%H:%M:%S" , localtime )
534
428
for i , (input , target ) in enumerate (train_loader ):
535
429
# discard final batch
536
430
537
- == == == =
538
- for i , (input , target ) in enumerate (train_loader ):
539
- # discard final batch
540
- > >> >> >> 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
541
431
if i == len (train_loader )- 1 :
542
432
break
543
433
# measure data loading time
@@ -549,7 +439,6 @@ def train(train_loader, model, criterion, optimizer, epoch):
549
439
target_var = target
550
440
551
441
# compute output, output size: [batch_size, num_class]
552
- < << << << HEAD
553
442
554
443
output = model (input_var )
555
444
@@ -596,53 +485,6 @@ def train(train_loader, model, criterion, optimizer, epoch):
596
485
end_time = time .strftime ("%Y/%m/%d-%H:%M:%S" , localtime )
597
486
598
487
599
- == == == =
600
- output = model (input_var )
601
-
602
- loss = criterion (output , target_var )
603
-
604
- # measure accuracy and record loss
605
- prec1 , prec5 = accuracy (output .data , target , topk = (1 ,5 ))
606
- losses .update (loss .item (), input .size (0 ))
607
- top1 .update (prec1 .item (), input .size (0 ))
608
- top5 .update (prec5 .item (), input .size (0 ))
609
-
610
-
611
- # compute gradient and do SGD step
612
- loss .backward ()
613
-
614
- if i % args .iter_size == 0 :
615
- # scale down gradients when iter size is functioning
616
- if args .iter_size != 1 :
617
- for g in optimizer .param_groups :
618
- for p in g ['params' ]:
619
- p .grad /= args .iter_size
620
-
621
- if args .clip_gradient is not None :
622
- total_norm = clip_grad_norm_ (model .parameters (), args .clip_gradient )
623
- if total_norm > args .clip_gradient :
624
- print ("clipping gradient: {} with coef {}" .format (total_norm , args .clip_gradient / total_norm ))
625
- else :
626
- total_norm = 0
627
-
628
- optimizer .step ()
629
- optimizer .zero_grad ()
630
-
631
-
632
- # measure elapsed time
633
- batch_time .update (time .time () - end )
634
- end = time .time ()
635
-
636
- if i % args .print_freq == 0 :
637
- print (('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t '
638
- 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t '
639
- 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t '
640
- 'Loss {loss.val:.4f} ({loss.avg:.4f})\t '
641
- 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t '
642
- 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})' .format (
643
- epoch , i , len (train_loader ), batch_time = batch_time ,
644
- data_time = data_time , loss = losses , top1 = top1 , top5 = top5 , lr = optimizer .param_groups [- 1 ]['lr' ])))
645
- > >> >> >> 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
646
488
647
489
648
490
def validate (val_loader , model , criterion , iter , logger = None ):
@@ -722,16 +564,10 @@ def update(self, val, n=1):
722
564
self .avg = self .sum / self .count
723
565
724
566
725
- < << << << HEAD
726
567
def adjust_learning_rate (optimizer , epoch , lr_steps , exp_num ):
727
568
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
728
569
# decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
729
570
decay = 0.1 ** (exp_num )
730
- == == == =
731
- def adjust_learning_rate (optimizer , epoch , lr_steps ):
732
- """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
733
- decay = 0.1 ** (sum (epoch >= np .array (lr_steps )))
734
- > >> >> >> 1 da05d6e7d9dc0b61b5fd230758ee355c9700f8a
735
571
lr = args .lr * decay
736
572
decay = args .weight_decay
737
573
for param_group in optimizer .param_groups :
0 commit comments