From d995bd7cf5a6f5f60be3967cc9f55393e49d55a2 Mon Sep 17 00:00:00 2001 From: PhiColTan Date: Thu, 16 Aug 2018 22:25:04 -0300 Subject: [PATCH] Change print statements to be compatible with Python3 --- lib/datasets/coco.py | 30 +++++++------- lib/datasets/pascal_voc.py | 24 +++++------ lib/datasets/tools/mcg_munge.py | 2 +- lib/datasets/voc_eval.py | 6 +-- lib/fast_rcnn/test.py | 6 +-- lib/fast_rcnn/train.py | 28 ++++++------- lib/pycocotools/coco.py | 20 ++++----- lib/pycocotools/cocoeval.py | 14 +++---- lib/roi_data_layer/layer.py | 6 +-- lib/roi_data_layer/minibatch.py | 2 +- lib/roi_data_layer/roidb.py | 16 ++++---- lib/rpn/anchor_target_layer.py | 52 +++++++++++------------ lib/rpn/generate.py | 4 +- lib/rpn/generate_anchors.py | 4 +- lib/rpn/proposal_layer.py | 12 +++--- lib/rpn/proposal_target_layer.py | 10 ++--- tools/compress_net.py | 4 +- tools/demo.py | 8 ++-- tools/eval_recall.py | 26 ++++++------ tools/reval.py | 4 +- tools/rpn_generate.py | 2 +- tools/train_faster_rcnn_alt_opt.py | 66 +++++++++++++++--------------- tools/train_net.py | 8 ++-- tools/train_svms.py | 12 +++--- 24 files changed, 183 insertions(+), 183 deletions(-) diff --git a/lib/datasets/coco.py b/lib/datasets/coco.py index bfe8ff3d7..29a326db4 100644 --- a/lib/datasets/coco.py +++ b/lib/datasets/coco.py @@ -141,8 +141,8 @@ def _roidb_from_proposals(self, method): if osp.exists(cache_file): with open(cache_file, 'rb') as fid: roidb = cPickle.load(fid) - print '{:s} {:s} roidb loaded from {:s}'.format(self.name, method, - cache_file) + print('{:s} {:s} roidb loaded from {:s}'.format(self.name, method, + cache_file)) return roidb if self._image_set in self._gt_splits: @@ -155,7 +155,7 @@ def _roidb_from_proposals(self, method): roidb = self._load_proposals(method, None) with open(cache_file, 'wb') as fid: cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL) - print 'wrote {:s} roidb to {:s}'.format(method, cache_file) + print('wrote {:s} roidb to {:s}'.format(method, cache_file)) return roidb def _load_proposals(self, method, gt_roidb): @@ -177,10 +177,10 @@ def _load_proposals(self, method, gt_roidb): 'edge_boxes_70'] assert method in valid_methods - print 'Loading {} boxes'.format(method) + print('Loading {} boxes'.format(method)) for i, index in enumerate(self._image_index): if i % 1000 == 0: - print '{:d} / {:d}'.format(i + 1, len(self._image_index)) + print('{:d} / {:d}'.format(i + 1, len(self._image_index))) box_file = osp.join( cfg.DATA_DIR, 'coco_proposals', method, 'mat', @@ -214,7 +214,7 @@ def gt_roidb(self): if osp.exists(cache_file): with open(cache_file, 'rb') as fid: roidb = cPickle.load(fid) - print '{} gt roidb loaded from {}'.format(self.name, cache_file) + print('{} gt roidb loaded from {}'.format(self.name, cache_file)) return roidb gt_roidb = [self._load_coco_annotation(index) @@ -222,7 +222,7 @@ def gt_roidb(self): with open(cache_file, 'wb') as fid: cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL) - print 'wrote gt roidb to {}'.format(cache_file) + print('wrote gt roidb to {}'.format(cache_file)) return gt_roidb def _load_coco_annotation(self, index): @@ -306,18 +306,18 @@ def _get_thr_ind(coco_eval, thr): precision = \ coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2] ap_default = np.mean(precision[precision > -1]) - print ('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] ' + print('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] ' '~~~~').format(IoU_lo_thresh, IoU_hi_thresh) - print '{:.1f}'.format(100 * ap_default) + print('{:.1f}'.format(100 * ap_default)) for cls_ind, cls in enumerate(self.classes): if cls == '__background__': continue # minus 1 because of __background__ precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2] ap = np.mean(precision[precision > -1]) - print '{:.1f}'.format(100 * ap) + print('{:.1f}'.format(100 * ap)) - print '~~~~ Summary metrics ~~~~' + print('~~~~ Summary metrics ~~~~') coco_eval.summarize() def _do_detection_eval(self, res_file, output_dir): @@ -331,7 +331,7 @@ def _do_detection_eval(self, res_file, output_dir): eval_file = osp.join(output_dir, 'detection_results.pkl') with open(eval_file, 'wb') as fid: cPickle.dump(coco_eval, fid, cPickle.HIGHEST_PROTOCOL) - print 'Wrote COCO eval results to: {}'.format(eval_file) + print('Wrote COCO eval results to: {}'.format(eval_file)) def _coco_results_one_category(self, boxes, cat_id): results = [] @@ -360,12 +360,12 @@ def _write_coco_results_file(self, all_boxes, res_file): for cls_ind, cls in enumerate(self.classes): if cls == '__background__': continue - print 'Collecting {} results ({:d}/{:d})'.format(cls, cls_ind, - self.num_classes - 1) + print('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind, + self.num_classes - 1)) coco_cat_id = self._class_to_coco_cat_id[cls] results.extend(self._coco_results_one_category(all_boxes[cls_ind], coco_cat_id)) - print 'Writing results json to {}'.format(res_file) + print('Writing results json to {}'.format(res_file)) with open(res_file, 'w') as fid: json.dump(results, fid) diff --git a/lib/datasets/pascal_voc.py b/lib/datasets/pascal_voc.py index b55f2f6b2..3d8a81ef3 100644 --- a/lib/datasets/pascal_voc.py +++ b/lib/datasets/pascal_voc.py @@ -100,14 +100,14 @@ def gt_roidb(self): if os.path.exists(cache_file): with open(cache_file, 'rb') as fid: roidb = cPickle.load(fid) - print '{} gt roidb loaded from {}'.format(self.name, cache_file) + print('{} gt roidb loaded from {}'.format(self.name, cache_file)) return roidb gt_roidb = [self._load_pascal_annotation(index) for index in self.image_index] with open(cache_file, 'wb') as fid: cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL) - print 'wrote gt roidb to {}'.format(cache_file) + print('wrote gt roidb to {}'.format(cache_file)) return gt_roidb @@ -124,7 +124,7 @@ def selective_search_roidb(self): if os.path.exists(cache_file): with open(cache_file, 'rb') as fid: roidb = cPickle.load(fid) - print '{} ss roidb loaded from {}'.format(self.name, cache_file) + print('{} ss roidb loaded from {}'.format(self.name, cache_file)) return roidb if int(self._year) == 2007 or self._image_set != 'test': @@ -135,7 +135,7 @@ def selective_search_roidb(self): roidb = self._load_selective_search_roidb(None) with open(cache_file, 'wb') as fid: cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL) - print 'wrote ss roidb to {}'.format(cache_file) + print('wrote ss roidb to {}'.format(cache_file)) return roidb @@ -151,7 +151,7 @@ def rpn_roidb(self): def _load_rpn_roidb(self, gt_roidb): filename = self.config['rpn_file'] - print 'loading {}'.format(filename) + print('loading {}'.format(filename)) assert os.path.exists(filename), \ 'rpn data not found at: {}'.format(filename) with open(filename, 'rb') as f: @@ -190,8 +190,8 @@ def _load_pascal_annotation(self, index): non_diff_objs = [ obj for obj in objs if int(obj.find('difficult').text) == 0] # if len(non_diff_objs) != len(objs): - # print 'Removed {} difficult objects'.format( - # len(objs) - len(non_diff_objs)) + # print('Removed {} difficult objects'.format( + # len(objs) - len(non_diff_objs))) objs = non_diff_objs num_objs = len(objs) @@ -243,7 +243,7 @@ def _write_voc_results_file(self, all_boxes): for cls_ind, cls in enumerate(self.classes): if cls == '__background__': continue - print 'Writing {} VOC results file'.format(cls) + print('Writing {} VOC results file'.format(cls)) filename = self._get_voc_results_file_template().format(cls) with open(filename, 'wt') as f: for im_ind, index in enumerate(self.image_index): @@ -273,7 +273,7 @@ def _do_python_eval(self, output_dir = 'output'): aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = True if int(self._year) < 2010 else False - print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No') + print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')) if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(self._classes): @@ -303,9 +303,9 @@ def _do_python_eval(self, output_dir = 'output'): print('--------------------------------------------------------------') def _do_matlab_eval(self, output_dir='output'): - print '-----------------------------------------------------' - print 'Computing results with the official MATLAB eval code.' - print '-----------------------------------------------------' + print('-----------------------------------------------------') + print('Computing results with the official MATLAB eval code.') + print('-----------------------------------------------------') path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets', 'VOCdevkit-matlab-wrapper') cmd = 'cd {} && '.format(path) diff --git a/lib/datasets/tools/mcg_munge.py b/lib/datasets/tools/mcg_munge.py index 1392aa308..22d5bdccd 100644 --- a/lib/datasets/tools/mcg_munge.py +++ b/lib/datasets/tools/mcg_munge.py @@ -28,7 +28,7 @@ def munge(src_dir): os.makedirs(dst_dir) src = os.path.join(src_dir, fn) dst = os.path.join(dst_dir, fn) - print 'MV: {} -> {}'.format(src, dst) + print('MV: {} -> {}'.format(src, dst)) os.rename(src, dst) if __name__ == '__main__': diff --git a/lib/datasets/voc_eval.py b/lib/datasets/voc_eval.py index 8d0a83076..b5aff8012 100644 --- a/lib/datasets/voc_eval.py +++ b/lib/datasets/voc_eval.py @@ -108,10 +108,10 @@ def voc_eval(detpath, for i, imagename in enumerate(imagenames): recs[imagename] = parse_rec(annopath.format(imagename)) if i % 100 == 0: - print 'Reading annotation for {:d}/{:d}'.format( - i + 1, len(imagenames)) + print('Reading annotation for {:d}/{:d}'.format( + i + 1, len(imagenames))) # save - print 'Saving cached annotations to {:s}'.format(cachefile) + print('Saving cached annotations to {:s}'.format(cachefile)) with open(cachefile, 'w') as f: cPickle.dump(recs, f) else: diff --git a/lib/fast_rcnn/test.py b/lib/fast_rcnn/test.py index f889d0977..a68b9c046 100644 --- a/lib/fast_rcnn/test.py +++ b/lib/fast_rcnn/test.py @@ -283,13 +283,13 @@ def test_net(net, imdb, max_per_image=100, thresh=0.05, vis=False): all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() - print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \ + print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \ .format(i + 1, num_images, _t['im_detect'].average_time, - _t['misc'].average_time) + _t['misc'].average_time)) det_file = os.path.join(output_dir, 'detections.pkl') with open(det_file, 'wb') as f: cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL) - print 'Evaluating detections' + print('Evaluating detections') imdb.evaluate_detections(all_boxes, output_dir) diff --git a/lib/fast_rcnn/train.py b/lib/fast_rcnn/train.py index 05bd594ff..a07903408 100644 --- a/lib/fast_rcnn/train.py +++ b/lib/fast_rcnn/train.py @@ -35,15 +35,15 @@ def __init__(self, solver_prototxt, roidb, output_dir, assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED if cfg.TRAIN.BBOX_REG: - print 'Computing bounding-box regression targets...' + print('Computing bounding-box regression targets...') self.bbox_means, self.bbox_stds = \ rdl_roidb.add_bbox_regression_targets(roidb) - print 'done' + print('done') self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: - print ('Loading pretrained model ' - 'weights from {:s}').format(pretrained_model) + print('Loading pretrained model ' + 'weights from {:s}'.format(pretrained_model)) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() @@ -82,7 +82,7 @@ def snapshot(self): filename = os.path.join(self.output_dir, filename) net.save(str(filename)) - print 'Wrote snapshot to: {:s}'.format(filename) + print('Wrote snapshot to: {:s}'.format(filename)) if scale_bbox_params: # restore net to original state @@ -101,7 +101,7 @@ def train_model(self, max_iters): self.solver.step(1) timer.toc() if self.solver.iter % (10 * self.solver_param.display) == 0: - print 'speed: {:.3f}s / iter'.format(timer.average_time) + print('speed: {:.3f}s / iter'.format(timer.average_time)) if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0: last_snapshot_iter = self.solver.iter @@ -114,13 +114,13 @@ def train_model(self, max_iters): def get_training_roidb(imdb): """Returns a roidb (Region of Interest database) for use in training.""" if cfg.TRAIN.USE_FLIPPED: - print 'Appending horizontally-flipped training examples...' + print('Appending horizontally-flipped training examples...') imdb.append_flipped_images() - print 'done' + print('done') - print 'Preparing training data...' + print('Preparing training data...') rdl_roidb.prepare_roidb(imdb) - print 'done' + print('done') return imdb.roidb @@ -144,8 +144,8 @@ def is_valid(entry): num = len(roidb) filtered_roidb = [entry for entry in roidb if is_valid(entry)] num_after = len(filtered_roidb) - print 'Filtered {} roidb entries: {} -> {}'.format(num - num_after, - num, num_after) + print('Filtered {} roidb entries: {} -> {}'.format(num - num_after, + num, num_after)) return filtered_roidb def train_net(solver_prototxt, roidb, output_dir, @@ -156,7 +156,7 @@ def train_net(solver_prototxt, roidb, output_dir, sw = SolverWrapper(solver_prototxt, roidb, output_dir, pretrained_model=pretrained_model) - print 'Solving...' + print('Solving...') model_paths = sw.train_model(max_iters) - print 'done solving' + print('done solving') return model_paths diff --git a/lib/pycocotools/coco.py b/lib/pycocotools/coco.py index 5d9f6b826..ab5fba7ed 100644 --- a/lib/pycocotools/coco.py +++ b/lib/pycocotools/coco.py @@ -74,16 +74,16 @@ def __init__(self, annotation_file=None): self.imgs = {} self.cats = {} if not annotation_file == None: - print 'loading annotations into memory...' + print('loading annotations into memory...') tic = time.time() dataset = json.load(open(annotation_file, 'r')) - print 'Done (t=%0.2fs)'%(time.time()- tic) + print('Done (t=%0.2fs)'%(time.time()- tic)) self.dataset = dataset self.createIndex() def createIndex(self): # create index - print 'creating index...' + print('creating index...') anns = {} imgToAnns = {} catToImgs = {} @@ -110,7 +110,7 @@ def createIndex(self): for ann in self.dataset['annotations']: catToImgs[ann['category_id']] += [ann['image_id']] - print 'index created!' + print('index created!') # create class members self.anns = anns @@ -125,7 +125,7 @@ def info(self): :return: """ for key, value in self.dataset['info'].items(): - print '%s: %s'%(key, value) + print('%s: %s'%(key, value)) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ @@ -276,7 +276,7 @@ def showAnns(self, anns): ax.add_collection(p) elif datasetType == 'captions': for ann in anns: - print ann['caption'] + print(ann['caption']) def loadRes(self, resFile): """ @@ -289,7 +289,7 @@ def loadRes(self, resFile): # res.dataset['info'] = copy.deepcopy(self.dataset['info']) # res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses']) - print 'Loading and preparing results... ' + print('Loading and preparing results... ') tic = time.time() anns = json.load(open(resFile)) assert type(anns) == list, 'results in not an array of objects' @@ -320,7 +320,7 @@ def loadRes(self, resFile): ann['bbox'] = mask.toBbox([ann['segmentation']])[0] ann['id'] = id+1 ann['iscrowd'] = 0 - print 'DONE (t=%0.2fs)'%(time.time()- tic) + print('DONE (t=%0.2fs)'%(time.time()- tic)) res.dataset['annotations'] = anns res.createIndex() @@ -334,7 +334,7 @@ def download( self, tarDir = None, imgIds = [] ): :return: ''' if tarDir is None: - print 'Please specify target directory' + print('Please specify target directory') return -1 if len(imgIds) == 0: imgs = self.imgs.values() @@ -348,4 +348,4 @@ def download( self, tarDir = None, imgIds = [] ): fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urllib.urlretrieve(img['coco_url'], fname) - print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic) + print('downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)) diff --git a/lib/pycocotools/cocoeval.py b/lib/pycocotools/cocoeval.py index f389eb0f2..03c8cfe10 100644 --- a/lib/pycocotools/cocoeval.py +++ b/lib/pycocotools/cocoeval.py @@ -91,7 +91,7 @@ def _toMask(objs, coco): t = coco.imgs[obj['image_id']] if type(obj['segmentation']) == list: if type(obj['segmentation'][0]) == dict: - print 'debug' + print('debug') obj['segmentation'] = mask.frPyObjects(obj['segmentation'],t['height'],t['width']) if len(obj['segmentation']) == 1: obj['segmentation'] = obj['segmentation'][0] @@ -132,7 +132,7 @@ def evaluate(self): :return: None ''' tic = time.time() - print 'Running per image evaluation... ' + print('Running per image evaluation... ') p = self.params p.imgIds = list(np.unique(p.imgIds)) if p.useCats: @@ -158,7 +158,7 @@ def evaluate(self): ] self._paramsEval = copy.deepcopy(self.params) toc = time.time() - print 'DONE (t=%0.2fs).'%(toc-tic) + print('DONE (t=%0.2fs).'%(toc-tic)) def computeIoU(self, imgId, catId): p = self.params @@ -277,10 +277,10 @@ def accumulate(self, p = None): :param p: input params for evaluation :return: None ''' - print 'Accumulating evaluation results... ' + print('Accumulating evaluation results... ') tic = time.time() if not self.evalImgs: - print 'Please run evaluate() first' + print('Please run evaluate() first') # allows input customized parameters if p is None: p = self.params @@ -371,7 +371,7 @@ def accumulate(self, p = None): 'recall': recall, } toc = time.time() - print 'DONE (t=%0.2fs).'%( toc-tic ) + print('DONE (t=%0.2fs).'%( toc-tic )) def summarize(self): ''' @@ -406,7 +406,7 @@ def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ): mean_s = -1 else: mean_s = np.mean(s[s>-1]) - print iStr.format(titleStr, typeStr, iouStr, areaStr, maxDetsStr, '%.3f'%(float(mean_s))) + print(iStr.format(titleStr, typeStr, iouStr, areaStr, maxDetsStr, '%.3f'%(float(mean_s)))) return mean_s if not self.eval: diff --git a/lib/roi_data_layer/layer.py b/lib/roi_data_layer/layer.py index 9f145fea8..abfd8e3f4 100644 --- a/lib/roi_data_layer/layer.py +++ b/lib/roi_data_layer/layer.py @@ -74,7 +74,7 @@ def set_roidb(self, roidb): self._prefetch_process.start() # Terminate the child process when the parent exists def cleanup(): - print 'Terminating BlobFetcher' + print('Terminating BlobFetcher') self._prefetch_process.terminate() self._prefetch_process.join() import atexit @@ -136,7 +136,7 @@ def setup(self, bottom, top): self._name_to_top_map['bbox_outside_weights'] = idx idx += 1 - print 'RoiDataLayer: name_to_top:', self._name_to_top_map + print('RoiDataLayer: name_to_top:', self._name_to_top_map) assert len(top) == len(self._name_to_top_map) def forward(self, bottom, top): @@ -188,7 +188,7 @@ def _get_next_minibatch_inds(self): return db_inds def run(self): - print 'BlobFetcher started' + print('BlobFetcher started') while True: db_inds = self._get_next_minibatch_inds() minibatch_db = [self._roidb[i] for i in db_inds] diff --git a/lib/roi_data_layer/minibatch.py b/lib/roi_data_layer/minibatch.py index f4535b022..5f60622ca 100644 --- a/lib/roi_data_layer/minibatch.py +++ b/lib/roi_data_layer/minibatch.py @@ -190,7 +190,7 @@ def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps): im = im.astype(np.uint8) cls = labels_blob[i] plt.imshow(im) - print 'class: ', cls, ' overlap: ', overlaps[i] + print('class: ', cls, ' overlap: ', overlaps[i]) plt.gca().add_patch( plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0], roi[3] - roi[1], fill=False, diff --git a/lib/roi_data_layer/roidb.py b/lib/roi_data_layer/roidb.py index 97a6a7612..3f8ea3479 100644 --- a/lib/roi_data_layer/roidb.py +++ b/lib/roi_data_layer/roidb.py @@ -83,16 +83,16 @@ def add_bbox_regression_targets(roidb): means = sums / class_counts stds = np.sqrt(squared_sums / class_counts - means ** 2) - print 'bbox target means:' - print means - print means[1:, :].mean(axis=0) # ignore bg class - print 'bbox target stdevs:' - print stds - print stds[1:, :].mean(axis=0) # ignore bg class + print('bbox target means:') + print(means) + print(means[1:, :].mean(axis=0)) # ignore bg class + print('bbox target stdevs:') + print(stds) + print(stds[1:, :].mean(axis=0)) # ignore bg class # Normalize targets if cfg.TRAIN.BBOX_NORMALIZE_TARGETS: - print "Normalizing targets" + print("Normalizing targets") for im_i in xrange(num_images): targets = roidb[im_i]['bbox_targets'] for cls in xrange(1, num_classes): @@ -100,7 +100,7 @@ def add_bbox_regression_targets(roidb): roidb[im_i]['bbox_targets'][cls_inds, 1:] -= means[cls, :] roidb[im_i]['bbox_targets'][cls_inds, 1:] /= stds[cls, :] else: - print "NOT normalizing targets" + print("NOT normalizing targets") # These values will be needed for making predictions # (the predicts will need to be unnormalized and uncentered) diff --git a/lib/rpn/anchor_target_layer.py b/lib/rpn/anchor_target_layer.py index 4563df1d2..764130e08 100644 --- a/lib/rpn/anchor_target_layer.py +++ b/lib/rpn/anchor_target_layer.py @@ -31,13 +31,13 @@ def setup(self, bottom, top): self._feat_stride = layer_params['feat_stride'] if DEBUG: - print 'anchors:' - print self._anchors - print 'anchor shapes:' - print np.hstack(( + print('anchors:') + print(self._anchors) + print('anchor shapes:') + print(np.hstack(( self._anchors[:, 2::4] - self._anchors[:, 0::4], self._anchors[:, 3::4] - self._anchors[:, 1::4], - )) + ))) self._counts = cfg.EPS self._sums = np.zeros((1, 4)) self._squared_sums = np.zeros((1, 4)) @@ -50,7 +50,7 @@ def setup(self, bottom, top): height, width = bottom[0].data.shape[-2:] if DEBUG: - print 'AnchorTargetLayer: height', height, 'width', width + print('AnchorTargetLayer: height', height, 'width', width) A = self._num_anchors # labels @@ -82,12 +82,12 @@ def forward(self, bottom, top): im_info = bottom[2].data[0, :] if DEBUG: - print '' - print 'im_size: ({}, {})'.format(im_info[0], im_info[1]) - print 'scale: {}'.format(im_info[2]) - print 'height, width: ({}, {})'.format(height, width) - print 'rpn: gt_boxes.shape', gt_boxes.shape - print 'rpn: gt_boxes', gt_boxes + print('') + print('im_size: ({}, {})'.format(im_info[0], im_info[1])) + print('scale: {}'.format(im_info[2])) + print('height, width: ({}, {})'.format(height, width)) + print('rpn: gt_boxes.shape', gt_boxes.shape) + print('rpn: gt_boxes', gt_boxes) # 1. Generate proposals from bbox deltas and shifted anchors shift_x = np.arange(0, width) * self._feat_stride @@ -115,13 +115,13 @@ def forward(self, bottom, top): )[0] if DEBUG: - print 'total_anchors', total_anchors - print 'inds_inside', len(inds_inside) + print('total_anchors', total_anchors) + print('inds_inside', len(inds_inside)) # keep only inside anchors anchors = all_anchors[inds_inside, :] if DEBUG: - print 'anchors.shape', anchors.shape + print('anchors.shape', anchors.shape) # label: 1 is positive, 0 is negative, -1 is dont care labels = np.empty((len(inds_inside), ), dtype=np.float32) @@ -168,8 +168,8 @@ def forward(self, bottom, top): disable_inds = npr.choice( bg_inds, size=(len(bg_inds) - num_bg), replace=False) labels[disable_inds] = -1 - #print "was %s inds, disabling %s, now %s inds" % ( - #len(bg_inds), len(disable_inds), np.sum(labels == 0)) + #print("was %s inds, disabling %s, now %s inds" % ( + #len(bg_inds), len(disable_inds), np.sum(labels == 0))) bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32) bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :]) @@ -199,10 +199,10 @@ def forward(self, bottom, top): self._counts += np.sum(labels == 1) means = self._sums / self._counts stds = np.sqrt(self._squared_sums / self._counts - means ** 2) - print 'means:' - print means - print 'stdevs:' - print stds + print('means:') + print(means) + print('stdevs:') + print(stds) # map up to original set of anchors labels = _unmap(labels, total_anchors, inds_inside, fill=-1) @@ -211,14 +211,14 @@ def forward(self, bottom, top): bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0) if DEBUG: - print 'rpn: max max_overlap', np.max(max_overlaps) - print 'rpn: num_positive', np.sum(labels == 1) - print 'rpn: num_negative', np.sum(labels == 0) + print('rpn: max max_overlap', np.max(max_overlaps)) + print('rpn: num_positive', np.sum(labels == 1)) + print('rpn: num_negative', np.sum(labels == 0)) self._fg_sum += np.sum(labels == 1) self._bg_sum += np.sum(labels == 0) self._count += 1 - print 'rpn: num_positive avg', self._fg_sum / self._count - print 'rpn: num_negative avg', self._bg_sum / self._count + print('rpn: num_positive avg', self._fg_sum / self._count) + print('rpn: num_negative avg', self._bg_sum / self._count) # labels labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2) diff --git a/lib/rpn/generate.py b/lib/rpn/generate.py index 060daf434..29707a836 100644 --- a/lib/rpn/generate.py +++ b/lib/rpn/generate.py @@ -106,8 +106,8 @@ def imdb_proposals(net, imdb): _t.tic() imdb_boxes[i], scores = im_proposals(net, im) _t.toc() - print 'im_proposals: {:d}/{:d} {:.3f}s' \ - .format(i + 1, imdb.num_images, _t.average_time) + print('im_proposals: {:d}/{:d} {:.3f}s' \ + .format(i + 1, imdb.num_images, _t.average_time)) if 0: dets = np.hstack((imdb_boxes[i], scores)) # from IPython import embed; embed() diff --git a/lib/rpn/generate_anchors.py b/lib/rpn/generate_anchors.py index 1125a801f..d9128746a 100644 --- a/lib/rpn/generate_anchors.py +++ b/lib/rpn/generate_anchors.py @@ -100,6 +100,6 @@ def _scale_enum(anchor, scales): import time t = time.time() a = generate_anchors() - print time.time() - t - print a + print(time.time() - t) + print(a) from IPython import embed; embed() diff --git a/lib/rpn/proposal_layer.py b/lib/rpn/proposal_layer.py index b157160b3..6ba42ad18 100644 --- a/lib/rpn/proposal_layer.py +++ b/lib/rpn/proposal_layer.py @@ -31,9 +31,9 @@ def setup(self, bottom, top): self._num_anchors = self._anchors.shape[0] if DEBUG: - print 'feat_stride: {}'.format(self._feat_stride) - print 'anchors:' - print self._anchors + print('feat_stride: {}'.format(self._feat_stride)) + print('anchors:') + print(self._anchors) # rois blob: holds R regions of interest, each is a 5-tuple # (n, x1, y1, x2, y2) specifying an image batch index n and a @@ -74,14 +74,14 @@ def forward(self, bottom, top): im_info = bottom[2].data[0, :] if DEBUG: - print 'im_size: ({}, {})'.format(im_info[0], im_info[1]) - print 'scale: {}'.format(im_info[2]) + print('im_size: ({}, {})'.format(im_info[0], im_info[1])) + print('scale: {}'.format(im_info[2])) # 1. Generate proposals from bbox deltas and shifted anchors height, width = scores.shape[-2:] if DEBUG: - print 'score map size: {}'.format(scores.shape) + print('score map size: {}'.format(scores.shape)) # Enumerate all shifts shift_x = np.arange(0, width) * self._feat_stride diff --git a/lib/rpn/proposal_target_layer.py b/lib/rpn/proposal_target_layer.py index 38e1f2c88..331940989 100644 --- a/lib/rpn/proposal_target_layer.py +++ b/lib/rpn/proposal_target_layer.py @@ -66,14 +66,14 @@ def forward(self, bottom, top): rois_per_image, self._num_classes) if DEBUG: - print 'num fg: {}'.format((labels > 0).sum()) - print 'num bg: {}'.format((labels == 0).sum()) + print('num fg: {}'.format((labels > 0).sum())) + print('num bg: {}'.format((labels == 0).sum())) self._count += 1 self._fg_num += (labels > 0).sum() self._bg_num += (labels == 0).sum() - print 'num fg avg: {}'.format(self._fg_num / self._count) - print 'num bg avg: {}'.format(self._bg_num / self._count) - print 'ratio: {:.3f}'.format(float(self._fg_num) / float(self._bg_num)) + print('num fg avg: {}'.format(self._fg_num / self._count)) + print('num bg avg: {}'.format(self._bg_num / self._count)) + print('ratio: {:.3f}'.format(float(self._fg_num) / float(self._bg_num))) # sampled rois top[0].reshape(*rois.shape) diff --git a/tools/compress_net.py b/tools/compress_net.py index e044e5bcd..09ac77154 100755 --- a/tools/compress_net.py +++ b/tools/compress_net.py @@ -100,7 +100,7 @@ def main(): # Compress fc7 if net_svd.params.has_key('fc7_L'): l_fc7 = net_svd.params['fc7_L'][0].data.shape[0] - print ' fc7_L bottleneck size: {}'.format(l_fc7) + print(' fc7_L bottleneck size: {}'.format(l_fc7)) W_fc7 = net.params['fc7'][0].data B_fc7 = net.params['fc7'][1].data @@ -119,7 +119,7 @@ def main(): filename = '{}/{}.caffemodel'.format(out_dir, out) net_svd.save(filename) - print 'Wrote svd model to: {:s}'.format(filename) + print('Wrote svd model to: {:s}'.format(filename)) if __name__ == '__main__': main() diff --git a/tools/demo.py b/tools/demo.py index 631c68a41..03b358343 100755 --- a/tools/demo.py +++ b/tools/demo.py @@ -81,7 +81,7 @@ def demo(net, image_name): timer.tic() scores, boxes = im_detect(net, im) timer.toc() - print ('Detection took {:.3f}s for ' + print('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]) # Visualize detections for each class @@ -134,7 +134,7 @@ def parse_args(): cfg.GPU_ID = args.gpu_id net = caffe.Net(prototxt, caffemodel, caffe.TEST) - print '\n\nLoaded network {:s}'.format(caffemodel) + print('\n\nLoaded network {:s}'.format(caffemodel)) # Warmup on a dummy image im = 128 * np.ones((300, 500, 3), dtype=np.uint8) @@ -144,8 +144,8 @@ def parse_args(): im_names = ['000456.jpg', '000542.jpg', '001150.jpg', '001763.jpg', '004545.jpg'] for im_name in im_names: - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' - print 'Demo for data/demo/{}'.format(im_name) + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('Demo for data/demo/{}'.format(im_name)) demo(net, im_name) plt.show() diff --git a/tools/eval_recall.py b/tools/eval_recall.py index b1a59dc27..976c7c5d0 100755 --- a/tools/eval_recall.py +++ b/tools/eval_recall.py @@ -48,23 +48,23 @@ def parse_args(): ar, gt_overlaps, recalls, thresholds = \ imdb.evaluate_recall(candidate_boxes=candidate_boxes) - print 'Method: {}'.format(args.method) - print 'AverageRec: {:.3f}'.format(ar) + print('Method: {}'.format(args.method)) + print('AverageRec: {:.3f}'.format(ar)) def recall_at(t): ind = np.where(thresholds > t - 1e-5)[0][0] assert np.isclose(thresholds[ind], t) return recalls[ind] - print 'Recall@0.5: {:.3f}'.format(recall_at(0.5)) - print 'Recall@0.6: {:.3f}'.format(recall_at(0.6)) - print 'Recall@0.7: {:.3f}'.format(recall_at(0.7)) - print 'Recall@0.8: {:.3f}'.format(recall_at(0.8)) - print 'Recall@0.9: {:.3f}'.format(recall_at(0.9)) + print('Recall@0.5: {:.3f}'.format(recall_at(0.5))) + print('Recall@0.6: {:.3f}'.format(recall_at(0.6))) + print('Recall@0.7: {:.3f}'.format(recall_at(0.7))) + print('Recall@0.8: {:.3f}'.format(recall_at(0.8))) + print('Recall@0.9: {:.3f}'.format(recall_at(0.9))) # print again for easy spreadsheet copying - print '{:.3f}'.format(ar) - print '{:.3f}'.format(recall_at(0.5)) - print '{:.3f}'.format(recall_at(0.6)) - print '{:.3f}'.format(recall_at(0.7)) - print '{:.3f}'.format(recall_at(0.8)) - print '{:.3f}'.format(recall_at(0.9)) + print('{:.3f}'.format(ar)) + print('{:.3f}'.format(recall_at(0.5))) + print('{:.3f}'.format(recall_at(0.6))) + print('{:.3f}'.format(recall_at(0.7))) + print('{:.3f}'.format(recall_at(0.8))) + print('{:.3f}'.format(recall_at(0.9))) diff --git a/tools/reval.py b/tools/reval.py index 905ec1b14..a8a97ba1e 100755 --- a/tools/reval.py +++ b/tools/reval.py @@ -50,12 +50,12 @@ def from_dets(imdb_name, output_dir, args): dets = cPickle.load(f) if args.apply_nms: - print 'Applying NMS to all detections' + print('Applying NMS to all detections') nms_dets = apply_nms(dets, cfg.TEST.NMS) else: nms_dets = dets - print 'Evaluating detections' + print('Evaluating detections') imdb.evaluate_detections(nms_dets, output_dir) if __name__ == '__main__': diff --git a/tools/rpn_generate.py b/tools/rpn_generate.py index f8ca4a167..332718f93 100755 --- a/tools/rpn_generate.py +++ b/tools/rpn_generate.py @@ -88,4 +88,4 @@ def parse_args(): rpn_file = os.path.join(output_dir, net.name + '_rpn_proposals.pkl') with open(rpn_file, 'wb') as f: cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL) - print 'Wrote RPN proposals to {}'.format(rpn_file) + print('Wrote RPN proposals to {}'.format(rpn_file)) diff --git a/tools/train_faster_rcnn_alt_opt.py b/tools/train_faster_rcnn_alt_opt.py index e49844a45..b7594bf75 100755 --- a/tools/train_faster_rcnn_alt_opt.py +++ b/tools/train_faster_rcnn_alt_opt.py @@ -59,9 +59,9 @@ def parse_args(): def get_roidb(imdb_name, rpn_file=None): imdb = get_imdb(imdb_name) - print 'Loaded dataset `{:s}` for training'.format(imdb.name) + print('Loaded dataset `{:s}` for training'.format(imdb.name)) imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD) - print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD) + print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)) if rpn_file is not None: imdb.config['rpn_file'] = rpn_file roidb = get_training_roidb(imdb) @@ -112,7 +112,7 @@ def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None, cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression cfg.TRAIN.PROPOSAL_METHOD = 'gt' cfg.TRAIN.IMS_PER_BATCH = 1 - print 'Init model: {}'.format(init_model) + print('Init model: {}'.format(init_model)) print('Using config:') pprint.pprint(cfg) @@ -120,9 +120,9 @@ def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None, _init_caffe(cfg) roidb, imdb = get_roidb(imdb_name) - print 'roidb len: {}'.format(len(roidb)) + print('roidb len: {}'.format(len(roidb))) output_dir = get_output_dir(imdb) - print 'Output will be saved to `{:s}`'.format(output_dir) + print('Output will be saved to `{:s}`'.format(output_dir)) model_paths = train_net(solver, roidb, output_dir, pretrained_model=init_model, @@ -141,7 +141,7 @@ def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS - print 'RPN model: {}'.format(rpn_model_path) + print('RPN model: {}'.format(rpn_model_path)) print('Using config:') pprint.pprint(cfg) @@ -152,12 +152,12 @@ def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, # We compute them on the image once and then flip the already computed # proposals. This might cause a minor loss in mAP (less proposal jittering). imdb = get_imdb(imdb_name) - print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name) + print('Loaded dataset `{:s}` for proposal generation'.format(imdb.name)) # Load RPN and configure output directory rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST) output_dir = get_output_dir(imdb) - print 'Output will be saved to `{:s}`'.format(output_dir) + print('Output will be saved to `{:s}`'.format(output_dir)) # Generate proposals on the imdb rpn_proposals = imdb_proposals(rpn_net, imdb) # Write proposals to disk and send the proposal file path through the @@ -167,7 +167,7 @@ def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None, output_dir, rpn_net_name + '_proposals.pkl') with open(rpn_proposals_path, 'wb') as f: cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL) - print 'Wrote RPN proposals to {}'.format(rpn_proposals_path) + print('Wrote RPN proposals to {}'.format(rpn_proposals_path)) queue.put({'proposal_path': rpn_proposals_path}) def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, @@ -178,8 +178,8 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead cfg.TRAIN.IMS_PER_BATCH = 2 - print 'Init model: {}'.format(init_model) - print 'RPN proposals: {}'.format(rpn_file) + print('Init model: {}'.format(init_model)) + print('RPN proposals: {}'.format(rpn_file)) print('Using config:') pprint.pprint(cfg) @@ -188,7 +188,7 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file) output_dir = get_output_dir(imdb) - print 'Output will be saved to `{:s}`'.format(output_dir) + print('Output will be saved to `{:s}`'.format(output_dir)) # Train Fast R-CNN model_paths = train_net(solver, roidb, output_dir, pretrained_model=init_model, @@ -224,9 +224,9 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, # solves, iters, etc. for each training stage solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name) - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' - print 'Stage 1 RPN, init from ImageNet model' - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('Stage 1 RPN, init from ImageNet model') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict( @@ -241,9 +241,9 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, rpn_stage1_out = mp_queue.get() p.join() - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' - print 'Stage 1 RPN, generate proposals' - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('Stage 1 RPN, generate proposals') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') mp_kwargs = dict( queue=mp_queue, @@ -256,9 +256,9 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' - print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model' - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') cfg.TRAIN.SNAPSHOT_INFIX = 'stage1' mp_kwargs = dict( @@ -274,9 +274,9 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, fast_rcnn_stage1_out = mp_queue.get() p.join() - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' - print 'Stage 2 RPN, init from stage 1 Fast R-CNN model' - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('Stage 2 RPN, init from stage 1 Fast R-CNN model') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict( @@ -291,9 +291,9 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, rpn_stage2_out = mp_queue.get() p.join() - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' - print 'Stage 2 RPN, generate proposals' - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('Stage 2 RPN, generate proposals') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') mp_kwargs = dict( queue=mp_queue, @@ -306,9 +306,9 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path'] p.join() - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' - print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model' - print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + print('Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model') + print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') cfg.TRAIN.SNAPSHOT_INFIX = 'stage2' mp_kwargs = dict( @@ -328,7 +328,7 @@ def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None, final_path = os.path.join( os.path.dirname(fast_rcnn_stage2_out['model_path']), args.net_name + '_faster_rcnn_final.caffemodel') - print 'cp {} -> {}'.format( - fast_rcnn_stage2_out['model_path'], final_path) + print('cp {} -> {}'.format( + fast_rcnn_stage2_out['model_path'], final_path)) shutil.copy(fast_rcnn_stage2_out['model_path'], final_path) - print 'Final model: {}'.format(final_path) + print('Final model: {}'.format(final_path)) diff --git a/tools/train_net.py b/tools/train_net.py index 622a95d68..9f96169d4 100755 --- a/tools/train_net.py +++ b/tools/train_net.py @@ -60,9 +60,9 @@ def parse_args(): def combined_roidb(imdb_names): def get_roidb(imdb_name): imdb = get_imdb(imdb_name) - print 'Loaded dataset `{:s}` for training'.format(imdb.name) + print('Loaded dataset `{:s}` for training'.format(imdb.name)) imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD) - print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD) + print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)) roidb = get_training_roidb(imdb) return roidb @@ -102,10 +102,10 @@ def get_roidb(imdb_name): caffe.set_device(args.gpu_id) imdb, roidb = combined_roidb(args.imdb_name) - print '{:d} roidb entries'.format(len(roidb)) + print('{:d} roidb entries'.format(len(roidb))) output_dir = get_output_dir(imdb) - print 'Output will be saved to `{:s}`'.format(output_dir) + print('Output will be saved to `{:s}`'.format(output_dir)) train_net(args.solver, roidb, output_dir, pretrained_model=args.pretrained_model, diff --git a/tools/train_svms.py b/tools/train_svms.py index 498bbf2a2..584775f9c 100755 --- a/tools/train_svms.py +++ b/tools/train_svms.py @@ -108,8 +108,8 @@ def get_pos_examples(self): cls_feat = feat[cls_inds, :] self.trainers[j].append_pos(cls_feat) - print 'get_pos_examples: {:d}/{:d} {:.3f}s' \ - .format(i + 1, len(roidb), _t.average_time) + print('get_pos_examples: {:d}/{:d} {:.3f}s' \ + .format(i + 1, len(roidb), _t.average_time)) def initialize_net(self): # Start all SVM parameters at zero @@ -338,16 +338,16 @@ def parse_args(): out_dir = os.path.dirname(args.caffemodel) imdb = get_imdb(args.imdb_name) - print 'Loaded dataset `{:s}` for training'.format(imdb.name) + print('Loaded dataset `{:s}` for training'.format(imdb.name)) # enhance roidb to contain flipped examples if cfg.TRAIN.USE_FLIPPED: - print 'Appending horizontally-flipped training examples...' + print('Appending horizontally-flipped training examples...') imdb.append_flipped_images() - print 'done' + print('done') SVMTrainer(net, imdb).train() filename = '{}/{}.caffemodel'.format(out_dir, out) net.save(filename) - print 'Wrote svm model to: {:s}'.format(filename) + print('Wrote svm model to: {:s}'.format(filename))