Skip to content

Commit

Permalink
Merge pull request #13 from swartn/py3
Browse files Browse the repository at this point in the history
Automatic conversion to python 3 using 2to3
  • Loading branch information
swartn authored Aug 6, 2019
2 parents 65fb987 + 0ffd468 commit 2f37d91
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 44 deletions.
12 changes: 6 additions & 6 deletions cmipdata/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@

# __all__ = ["join_exp_slice", "zonmean", "loaddata", "match_exp", "remap_timelim", "remap_cmip_nc" ,"mload1d", "climatology", "areaint"]

from classes import *
from preprocessing_tools import *
from .classes import *
from .preprocessing_tools import *

# Requires cdo python bindings and netcdf4
try:
from loading_tools import *
from .loading_tools import *
except ImportError:
print 'Could not import loading_tools. Check that the correct versions of cdo, numpy, and netCDF4 are installed.'
print('Could not import loading_tools. Check that the correct versions of cdo, numpy, and netCDF4 are installed.')

# Requires matplotlib
try:
from plotting_tools import *
from .plotting_tools import *
except ImportError:
print 'Could not import plotting_tools. Check that the correct versions of cdo, numpy, scipy, Basemap, matplotlib and netCDF4 are installed.'
print('Could not import plotting_tools. Check that the correct versions of cdo, numpy, scipy, Basemap, matplotlib and netCDF4 are installed.')
26 changes: 13 additions & 13 deletions cmipdata/classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __init__(self, genre, name, parent=None, **kwargs):
self.name = name
self.children = []
self.parent = parent
for k,v in kwargs.items():
for k,v in list(kwargs.items()):
setattr(self, k, v)

def add(self, child):
Expand Down Expand Up @@ -208,7 +208,7 @@ def sq(node):
if node.genre != 'ensemble':
node = node.parent
node.delete(delete)
print 'Removing ' + delete.name + ' from ' + delete.parent.name
print('Removing ' + delete.name + ' from ' + delete.parent.name)
sq(node)
for n in node.children:
sq(n)
Expand All @@ -230,27 +230,27 @@ def getDictionary(self):
def sinfo(self, listOfGenres=['variable', 'model', 'experiment', 'realization', 'ncfile']):
""" Returns the number of models, experiments, realizations, variables and files
in the DataNode"""
print "This ensemble contains:"
print("This ensemble contains:")
for key in listOfGenres:
if key == 'realization':
print str(len(list(self.objects(key)))) + " " + key + "s"
print(str(len(list(self.objects(key)))) + " " + key + "s")
else:
print str(len(self.lister(key))) + " " + key + "s"
print(str(len(self.lister(key))) + " " + key + "s")

def fulldetails(self):
""" prints information about the number of models,
experiments, variables and files ina DataNode tree.
"""
for model in self.children:
print model.name + ':'
print(model.name + ':')
for experiment in model.children:
print '\t' + experiment.name
print('\t' + experiment.name)
for realization in experiment.children:
print '\t\t' + realization.name
print('\t\t' + realization.name)
for variable in realization.children:
print '\t\t\t' + variable.name
print('\t\t\t' + variable.name)
for filename in variable.children:
print '\t\t\t\t' + filename.name
print('\t\t\t\t' + filename.name)

def fulldetails_tofile(self, fi):
""" prints information about the number of models,
Expand Down Expand Up @@ -414,7 +414,7 @@ def match_models(ens1, ens2, delete=False):
files = m.lister('ncfile')
for f in files:
os.system('rm -f ' + f)
print 'deleting %s from ens1' % (m.name)
print('deleting %s from ens1' % (m.name))
ens1.delete(m)

m = ens2.getChild(name)
Expand All @@ -423,7 +423,7 @@ def match_models(ens1, ens2, delete=False):
files = m.lister('ncfile')
for f in files:
os.system('rm -f ' + f)
print 'deleting %s from ens2' % (m.name)
print('deleting %s from ens2' % (m.name))
ens2.delete(m)

ens1.squeeze()
Expand Down Expand Up @@ -463,7 +463,7 @@ def match_realizations(ens1, ens2, delete=False):
matches = set(mer_string_e1).intersection(mer_string_e2)
misses = set(mer_string_e1).symmetric_difference(mer_string_e2)

print 'misses:', len(misses), 'matches:', len(matches)
print('misses:', len(misses), 'matches:', len(matches))

# delete realizations not in both ensembles
def deleting(items):
Expand Down
2 changes: 1 addition & 1 deletion cmipdata/plotting_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
.. moduleauthor:: Neil Swart <[email protected]>
"""
import matplotlib.pyplot as plt
from loading_tools import loadvar, get_dimensions
from .loading_tools import loadvar, get_dimensions
import scipy as sp
from scipy import stats
import numpy as np
Expand Down
48 changes: 24 additions & 24 deletions cmipdata/preprocessing_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
"""
import os
import glob
import classes as dc
from . import classes as dc
import copy
import itertools

Expand Down Expand Up @@ -95,7 +95,7 @@ def cat_exp_slices(ensemble, delete=True, output_prefix=''):
enddates = [f.end_date for f in files]
# check if there are multiple files
if len(modfiles) > 1:
print 'joining files'
print('joining files')
infiles = ' '.join(modfiles)
outfile = (output_prefix +
os.path.split(files[0].getNameWithoutDates())[1] + '_' +
Expand All @@ -106,7 +106,7 @@ def cat_exp_slices(ensemble, delete=True, output_prefix=''):
catstring = 'cdo mergetime ' + infiles + ' ' + outfile
os.system(catstring)
else:
print outfile + ' already exists.'
print(outfile + ' already exists.')
f = dc.DataNode('ncfile', outfile, parent=var, start_date=min(startdates), end_date=max(enddates))
var.children = [f]

Expand Down Expand Up @@ -244,7 +244,7 @@ def cat_experiments(ensemble, variable_name, exp1_name, exp2_name, delete=True,
out_enddate + '.nc')

# do the concatenation using CDO
print "\n join " + model.name + '_' + e1r.name + ' ' + e1.name + ' to ' + e2.name
print("\n join " + model.name + '_' + e1r.name + ' ' + e1.name + ' to ' + e2.name)
catstring = ('cdo mergetime ' + infiles + ' ' + outfile)

os.system(catstring)
Expand Down Expand Up @@ -283,17 +283,17 @@ def cat_experiments(ensemble, variable_name, exp1_name, exp2_name, delete=True,
os.system(delstr)

# Remove models with missing experiments from ens, and then return ens
print ' \n\n Models deleted from ensemble (missing one experiment completely): \n'
print '\t Model \t Experiment \n'
print(' \n\n Models deleted from ensemble (missing one experiment completely): \n')
print('\t Model \t Experiment \n')

for model_name, missing_experiment in models_to_delete.iteritems():
for model_name, missing_experiment in models_to_delete.items():
ens.delete(ens.getChild(model_name))
print '\t %s \t %s' % (model_name, missing_experiment)
print('\t %s \t %s' % (model_name, missing_experiment))

print ' \n\n Realizations deleted (missing from one experiment): \n'
print '\t Model \t Realizations \n'
for key, value in realizations_to_delete.iteritems():
print '\t %s \t %s' % (key, ' '.join(value))
print(' \n\n Realizations deleted (missing from one experiment): \n')
print('\t Model \t Realizations \n')
for key, value in realizations_to_delete.items():
print('\t %s \t %s' % (key, ' '.join(value)))

ens.squeeze()
return ens
Expand Down Expand Up @@ -393,10 +393,10 @@ def ens_stats(ens, variable_name, output_prefix=''):
files_to_mean.append(outfile)

in_files = ' '.join(files_to_mean)
print files_to_mean[0]
print experiments[experimentname][0][1]
print(files_to_mean[0])
print(experiments[experimentname][0][1])
outfilename = os.path.split(files_to_mean[0])[1].replace(experiments[experimentname][0][1] + '_', "")
print outfilename
print(outfilename)
out_file = output_prefix + 'ENS-MEAN_' + outfilename

cdo_str = 'cdo ensmean ' + in_files + ' ' + out_file
Expand Down Expand Up @@ -574,7 +574,7 @@ def zonmean(ensemble, delete=True, output_prefix=''):
# if zonalmean is not succesful, delete the new file
if ex != 0:
try:
print 'deleting ' + outfile
print('deleting ' + outfile)
os.system('rm -f ' + outfile)
except:
pass
Expand Down Expand Up @@ -703,7 +703,7 @@ def remap(ensemble, remap='r360x180', method='remapdis', delete=True, output_pre
# if remapping is not successful delete the new file
if ex != 0:
try:
print 'deleting ' + outfile
print('deleting ' + outfile)
os.system('rm -f ' + outfile)
except:
pass
Expand Down Expand Up @@ -766,14 +766,14 @@ def time_slice(ensemble, start_date, end_date, delete=True, output_prefix=''):
end_yyyymm = end_date.replace('-', '')[0:6]

for f in ens.objects('ncfile'):
print f.name
print(f.name)
# don't proceed if the file already has the correct start date
if f.start_date != start_yyyymm or f.start_date != end_yyyymm:
var = f.parent
# check that the new date range is within the old date range
if f.start_date <= start_yyyymm and f.end_date >= end_yyyymm:
outfile = output_prefix + os.path.split(f.getNameWithoutDates())[1] + '_' + start_yyyymm + '-' + end_yyyymm + '.nc'
print 'time limiting...'
print('time limiting...')

cdostr = ('cdo -L seldate,' + date_range + ' -selvar,' +
var.name + ' ' + f.name + ' ' + outfile)
Expand All @@ -782,7 +782,7 @@ def time_slice(ensemble, start_date, end_date, delete=True, output_prefix=''):
# if the time silcing is unsuccesful, remove the new file
if ex != 0:
try:
print 'deleting ' + outfile
print('deleting ' + outfile)
os.system('rm -f ' + outfile)
except:
pass
Expand All @@ -792,7 +792,7 @@ def time_slice(ensemble, start_date, end_date, delete=True, output_prefix=''):
var.add(ncfile)

else:
print "%s %s is not in the date-range" % (var.parent.parent.parent.name, var.parent.name)
print("%s %s is not in the date-range" % (var.parent.parent.parent.name, var.parent.name))

var.delete(f)

Expand Down Expand Up @@ -938,7 +938,7 @@ def my_operator(ensemble, my_cdo_str="", output_prefix='processed_', delete=Fals
# if the operation is unsuccessful, delete the new file
if ex != 0:
try:
print 'Failed processing... deleting ' + outfile
print('Failed processing... deleting ' + outfile)
os.system('rm -f ' + outfile)
except:
pass
Expand Down Expand Up @@ -1016,7 +1016,7 @@ def trends(ensemble, start_date, end_date, delete=False):
# check the date range is within the file range
if f.start_date <= start_yyyymm and f.end_date >= end_yyyymm:
outfile = f.getNameWithoutDates() + '_' + start_yyyymm + '-' + end_yyyymm + '.nc'
print 'time limiting...'
print('time limiting...')
cdostr = ('cdo trend -seldate,' + date_range + ' ' +
'-selvar,' + var.name + ' ' + f.name + ' ' +
'intercept_' + outfile + ' ' +
Expand All @@ -1027,7 +1027,7 @@ def trends(ensemble, start_date, end_date, delete=False):
# if the trands are not successful the new file is deleted
if ex != 0:
try:
print 'Failed processing... deleting ' + outfile
print('Failed processing... deleting ' + outfile)
os.system('rm -f ' + outfile)
os.system('rm -f intercept_' + outfile)
os.system('rm -f slope_' + outfile)
Expand Down

0 comments on commit 2f37d91

Please sign in to comment.