Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change print statements to be compatible with Python3 #849

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 15 additions & 15 deletions lib/datasets/coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,8 @@ def _roidb_from_proposals(self, method):
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{:s} {:s} roidb loaded from {:s}'.format(self.name, method,
cache_file)
print('{:s} {:s} roidb loaded from {:s}'.format(self.name, method,
cache_file))
return roidb

if self._image_set in self._gt_splits:
Expand All @@ -155,7 +155,7 @@ def _roidb_from_proposals(self, method):
roidb = self._load_proposals(method, None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote {:s} roidb to {:s}'.format(method, cache_file)
print('wrote {:s} roidb to {:s}'.format(method, cache_file))
return roidb

def _load_proposals(self, method, gt_roidb):
Expand All @@ -177,10 +177,10 @@ def _load_proposals(self, method, gt_roidb):
'edge_boxes_70']
assert method in valid_methods

print 'Loading {} boxes'.format(method)
print('Loading {} boxes'.format(method))
for i, index in enumerate(self._image_index):
if i % 1000 == 0:
print '{:d} / {:d}'.format(i + 1, len(self._image_index))
print('{:d} / {:d}'.format(i + 1, len(self._image_index)))

box_file = osp.join(
cfg.DATA_DIR, 'coco_proposals', method, 'mat',
Expand Down Expand Up @@ -214,15 +214,15 @@ def gt_roidb(self):
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb

gt_roidb = [self._load_coco_annotation(index)
for index in self._image_index]

with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb

def _load_coco_annotation(self, index):
Expand Down Expand Up @@ -306,18 +306,18 @@ def _get_thr_ind(coco_eval, thr):
precision = \
coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
print ('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
print('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
'~~~~').format(IoU_lo_thresh, IoU_hi_thresh)
print '{:.1f}'.format(100 * ap_default)
print('{:.1f}'.format(100 * ap_default))
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
print '{:.1f}'.format(100 * ap)
print('{:.1f}'.format(100 * ap))

print '~~~~ Summary metrics ~~~~'
print('~~~~ Summary metrics ~~~~')
coco_eval.summarize()

def _do_detection_eval(self, res_file, output_dir):
Expand All @@ -331,7 +331,7 @@ def _do_detection_eval(self, res_file, output_dir):
eval_file = osp.join(output_dir, 'detection_results.pkl')
with open(eval_file, 'wb') as fid:
cPickle.dump(coco_eval, fid, cPickle.HIGHEST_PROTOCOL)
print 'Wrote COCO eval results to: {}'.format(eval_file)
print('Wrote COCO eval results to: {}'.format(eval_file))

def _coco_results_one_category(self, boxes, cat_id):
results = []
Expand Down Expand Up @@ -360,12 +360,12 @@ def _write_coco_results_file(self, all_boxes, res_file):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
self.num_classes - 1)
print('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
self.num_classes - 1))
coco_cat_id = self._class_to_coco_cat_id[cls]
results.extend(self._coco_results_one_category(all_boxes[cls_ind],
coco_cat_id))
print 'Writing results json to {}'.format(res_file)
print('Writing results json to {}'.format(res_file))
with open(res_file, 'w') as fid:
json.dump(results, fid)

Expand Down
24 changes: 12 additions & 12 deletions lib/datasets/pascal_voc.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,14 +100,14 @@ def gt_roidb(self):
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb

gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
print('wrote gt roidb to {}'.format(cache_file))

return gt_roidb

Expand All @@ -124,7 +124,7 @@ def selective_search_roidb(self):
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb

if int(self._year) == 2007 or self._image_set != 'test':
Expand All @@ -135,7 +135,7 @@ def selective_search_roidb(self):
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
print('wrote ss roidb to {}'.format(cache_file))

return roidb

Expand All @@ -151,7 +151,7 @@ def rpn_roidb(self):

def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print 'loading {}'.format(filename)
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
Expand Down Expand Up @@ -190,8 +190,8 @@ def _load_pascal_annotation(self, index):
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
# print('Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs)))
objs = non_diff_objs
num_objs = len(objs)

Expand Down Expand Up @@ -243,7 +243,7 @@ def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
Expand Down Expand Up @@ -273,7 +273,7 @@ def _do_python_eval(self, output_dir = 'output'):
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
Expand Down Expand Up @@ -303,9 +303,9 @@ def _do_python_eval(self, output_dir = 'output'):
print('--------------------------------------------------------------')

def _do_matlab_eval(self, output_dir='output'):
print '-----------------------------------------------------'
print 'Computing results with the official MATLAB eval code.'
print '-----------------------------------------------------'
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
Expand Down
2 changes: 1 addition & 1 deletion lib/datasets/tools/mcg_munge.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def munge(src_dir):
os.makedirs(dst_dir)
src = os.path.join(src_dir, fn)
dst = os.path.join(dst_dir, fn)
print 'MV: {} -> {}'.format(src, dst)
print('MV: {} -> {}'.format(src, dst))
os.rename(src, dst)

if __name__ == '__main__':
Expand Down
6 changes: 3 additions & 3 deletions lib/datasets/voc_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,10 @@ def voc_eval(detpath,
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print 'Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames))
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print 'Saving cached annotations to {:s}'.format(cachefile)
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'w') as f:
cPickle.dump(recs, f)
else:
Expand Down
6 changes: 3 additions & 3 deletions lib/fast_rcnn/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,13 +283,13 @@ def test_net(net, imdb, max_per_image=100, thresh=0.05, vis=False):
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()

print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
_t['misc'].average_time))

det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)

print 'Evaluating detections'
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir)
28 changes: 14 additions & 14 deletions lib/fast_rcnn/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,15 @@ def __init__(self, solver_prototxt, roidb, output_dir,
assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED

if cfg.TRAIN.BBOX_REG:
print 'Computing bounding-box regression targets...'
print('Computing bounding-box regression targets...')
self.bbox_means, self.bbox_stds = \
rdl_roidb.add_bbox_regression_targets(roidb)
print 'done'
print('done')

self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
print('Loading pretrained model '
'weights from {:s}'.format(pretrained_model))
self.solver.net.copy_from(pretrained_model)

self.solver_param = caffe_pb2.SolverParameter()
Expand Down Expand Up @@ -82,7 +82,7 @@ def snapshot(self):
filename = os.path.join(self.output_dir, filename)

net.save(str(filename))
print 'Wrote snapshot to: {:s}'.format(filename)
print('Wrote snapshot to: {:s}'.format(filename))

if scale_bbox_params:
# restore net to original state
Expand All @@ -101,7 +101,7 @@ def train_model(self, max_iters):
self.solver.step(1)
timer.toc()
if self.solver.iter % (10 * self.solver_param.display) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
print('speed: {:.3f}s / iter'.format(timer.average_time))

if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = self.solver.iter
Expand All @@ -114,13 +114,13 @@ def train_model(self, max_iters):
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print 'done'
print('done')

print 'Preparing training data...'
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print 'done'
print('done')

return imdb.roidb

Expand All @@ -144,8 +144,8 @@ def is_valid(entry):
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print 'Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after)
print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
num, num_after))
return filtered_roidb

def train_net(solver_prototxt, roidb, output_dir,
Expand All @@ -156,7 +156,7 @@ def train_net(solver_prototxt, roidb, output_dir,
sw = SolverWrapper(solver_prototxt, roidb, output_dir,
pretrained_model=pretrained_model)

print 'Solving...'
print('Solving...')
model_paths = sw.train_model(max_iters)
print 'done solving'
print('done solving')
return model_paths
20 changes: 10 additions & 10 deletions lib/pycocotools/coco.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,16 +74,16 @@ def __init__(self, annotation_file=None):
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
print('Done (t=%0.2fs)'%(time.time()- tic))
self.dataset = dataset
self.createIndex()

def createIndex(self):
# create index
print 'creating index...'
print('creating index...')
anns = {}
imgToAnns = {}
catToImgs = {}
Expand All @@ -110,7 +110,7 @@ def createIndex(self):
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]

print 'index created!'
print('index created!')

# create class members
self.anns = anns
Expand All @@ -125,7 +125,7 @@ def info(self):
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
print('%s: %s'%(key, value))

def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Expand Down Expand Up @@ -276,7 +276,7 @@ def showAnns(self, anns):
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
print(ann['caption'])

def loadRes(self, resFile):
"""
Expand All @@ -289,7 +289,7 @@ def loadRes(self, resFile):
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])

print 'Loading and preparing results... '
print('Loading and preparing results... ')
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
Expand Down Expand Up @@ -320,7 +320,7 @@ def loadRes(self, resFile):
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
print('DONE (t=%0.2fs)'%(time.time()- tic))

res.dataset['annotations'] = anns
res.createIndex()
Expand All @@ -334,7 +334,7 @@ def download( self, tarDir = None, imgIds = [] ):
:return:
'''
if tarDir is None:
print 'Please specify target directory'
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
Expand All @@ -348,4 +348,4 @@ def download( self, tarDir = None, imgIds = [] ):
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
print('downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic))
Loading