Skip to content
This repository was archived by the owner on Dec 7, 2018. It is now read-only.

Commit

Permalink
Rename the likelihood module to models (#35)
Browse files Browse the repository at this point in the history
* rename likelihood module models

* rename classes

* update gwin executable

* update samplers

* update io module

* update plot_movie

* update unit tests

* update init

* update __init__ again

* update option utils

* change GaussianNoise.name to gaussian_noise

* update test_option_utils

* rename test_likelihood to test_models

* fix whitespace

* rename variable_args to model_params

* update models

* update samplers

* update gwin executable

* update io

* update workflow utilities

* update option_utils

* update executables

* update tests

* update inference_test

* update docs

* rename static_args static_params

* rename model_params variable_params

* fix pep8 issues

* more pep8
  • Loading branch information
Collin Capano authored Jul 13, 2018
1 parent 67ee007 commit 7454fad
Show file tree
Hide file tree
Showing 27 changed files with 601 additions and 574 deletions.
44 changes: 21 additions & 23 deletions bin/gwin
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ parser.add_argument("--seed", type=int, default=0,
parser.add_argument("--samples-file", default=None,
help="Use an iteration from an InferenceFile as the "
"initial proposal distribution. The same "
"number of walkers and the same [variable_args] "
"number of walkers and the same [variable_params] "
"section in the configuration file should be used. "
"The priors must allow encompass the initial "
"positions from the InferenceFile being read.")
Expand Down Expand Up @@ -165,9 +165,6 @@ else:
"see --help")


# get likelihood class
likelihood_args = {}

# set seed
numpy.random.seed(opts.seed)
logging.info("Using seed %i", opts.seed)
Expand All @@ -180,18 +177,18 @@ numpy.seterr(divide='ignore', invalid='ignore')
ctx = scheme.from_cli(opts)
fft.from_cli(opts)

# Set up likelihood arguments
likelihood_args = {}
# Set up model arguments
model_args = {}

# get the data and psd
strain_dict, stilde_dict, psd_dict = option_utils.data_from_cli(opts)
low_frequency_cutoff_dict = option_utils.low_frequency_cutoff_from_cli(opts)
if stilde_dict:
likelihood_args['data'] = stilde_dict
likelihood_args['f_lower'] = low_frequency_cutoff_dict.values()[0]
likelihood_args['delta_f'] = stilde_dict.values()[0].delta_f
likelihood_args['delta_t'] = strain_dict.values()[0].delta_t
likelihood_args['psds'] = psd_dict
model_args['data'] = stilde_dict
model_args['f_lower'] = low_frequency_cutoff_dict.values()[0]
model_args['delta_f'] = stilde_dict.values()[0].delta_f
model_args['delta_t'] = strain_dict.values()[0].delta_t
model_args['psds'] = psd_dict

with ctx:

Expand All @@ -203,27 +200,26 @@ with ctx:
logging.info("Initializing calibration model")
recalib = {ifo : strain.read_model_from_config(cp, ifo) for
ifo in opts.instruments}
likelihood_args['recalib'] = recalib
model_args['recalib'] = recalib

# get gates for templates
gates = strain.gates_from_cli(opts)
if gates:
likelihood_args['gates'] = gates
model_args['gates'] = gates

logging.info("Setting up likelihood")
logging.info("Setting up model")

# construct class that will return the natural logarithm of likelihood
likelihood = gwin.likelihood.read_from_config(cp, section="likelihood",
**likelihood_args)
model = gwin.models.read_from_config(cp, section="model",
**model_args)

burn_in_eval = burn_in.BurnIn(opts.burn_in_function,
min_iterations=opts.min_burn_in)

# for likelihood evaluator
logging.info("Setting up sampler")

# create sampler that will run
sampler = option_utils.sampler_from_cli(opts, likelihood)
sampler = option_utils.sampler_from_cli(opts, model)

# save information about this data and settings
if not checkpoint_valid:
Expand Down Expand Up @@ -274,7 +270,9 @@ with ctx:
elif len(cp.get_subsections("initial")):
initial_dists = distributions.read_distributions_from_config(
cp, section="initial")
init_prior = distributions.JointDistribution(variable_args,
constraints = distributions.read_constraints_from_config(cp,
constraint_section="initial_constraint")
init_prior = distributions.JointDistribution(sampler.variable_params,
*initial_dists, **{"constraints" : constraints})
else:
init_prior = None
Expand All @@ -300,12 +298,12 @@ with ctx:
sampler.write_burn_in_iterations(fp, burnidx, is_burned_in)
# write the burn in results
logging.info("Writing burn in samples to file")
sampler.write_results(fp, static_args=likelihood.static_args,
sampler.write_results(fp, static_params=model.static_params,
ifos=opts.instruments)
# write to backup file
with InferenceFile(backup_file, "a") as fp:
sampler.write_burn_in_iterations(fp, burnidx, is_burned_in)
sampler.write_results(fp, static_args=likelihood.static_args,
sampler.write_results(fp, static_params=model.static_params,
ifos=opts.instruments)


Expand Down Expand Up @@ -356,7 +354,7 @@ with ctx:
with InferenceFile(checkpoint_file, "a") as fp:

logging.info("Writing results to file")
sampler.write_results(fp,static_args=likelihood.static_args,
sampler.write_results(fp, static_params=model.static_params,
ifos=opts.instruments)
logging.info("Updating burn in")
burnidx, is_burned_in = burn_in_eval.update(sampler, fp)
Expand All @@ -373,7 +371,7 @@ with ctx:
with InferenceFile(backup_file, "a") as fp:

logging.info("Writing to backup file")
sampler.write_results(fp,static_args=likelihood.static_args,
sampler.write_results(fp, static_params=model.static_params,
ifos=opts.instruments)
sampler.write_burn_in_iterations(fp, burnidx, is_burned_in)
if acls is not None:
Expand Down
6 changes: 3 additions & 3 deletions bin/gwin_make_inj_workflow
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ for i in range(n_injections):
# make node for writing HTML table of parameters
post_table_files += gwin_workflow.make_inference_summary_table(
workflow, inference_file, rdir["posteriors"],
variable_args=unique_plot_parameters,
variable_params=unique_plot_parameters,
analysis_seg=workflow.analysis_time,
tags=[str(i)])

Expand Down Expand Up @@ -256,12 +256,12 @@ if not opts.data_type == "analytical":
inj_int_files = gwin_workflow.make_inference_inj_plots(
workflow,
inference_files, rdir.base,
cp.options("variable_args"),
cp.options("variable_params"),
name="inference_intervals")
inj_rec_files = gwin_workflow.make_inference_inj_plots(
workflow,
inference_files, rdir.base,
cp.options("variable_args"),
cp.options("variable_params"),
name="inference_recovery")
layout.two_column_layout(rdir.base,
[(a, b)
Expand Down
2 changes: 1 addition & 1 deletion bin/gwin_make_workflow
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ for num_event in range(num_events):
base = "posteriors"
post_table_files = inffu.make_inference_summary_table(
workflow, inference_file, rdir[base],
variable_args=all_parameters,
variable_params=all_parameters,
analysis_seg=analysis_time,
tags=opts.tags + [str(num_event)])
post_files = inffu.make_inference_posterior_plot(
Expand Down
14 changes: 7 additions & 7 deletions bin/gwin_plot_movie
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ else:

# get samples from InferenceFile
file_parameters, trans = transforms.get_common_cbc_transforms(
parameters, fp.variable_args)
parameters, fp.variable_params)
samples = fp.read_samples(file_parameters, thin_start=thin_start,
thin_interval=thinint, thin_end=thin_end,
iteration=itermask, flatten=False)
Expand All @@ -204,14 +204,14 @@ if samples.ndim > 2:

# Get z-values
if opts.z_arg is not None:
logging.info("Getting likelihood stats")
likelihood_stats = fp.read_likelihood_stats(thin_start=thin_start,
logging.info("Getting model stats")
model_stats = fp.read_model_stats(thin_start=thin_start,
thin_end=thin_end, thin_interval=thinint, iteration=itermask,
flatten=False)
if likelihood_stats.ndim > 2:
_, ii, jj = likelihood_stats.shape
likelihood_stats = likelihood_stats.reshape((ii, jj))
zvals, zlbl = option_utils.get_zvalues(fp, opts.z_arg, likelihood_stats)
if model_stats.ndim > 2:
_, ii, jj = model_stats.shape
model_stats = model_stats.reshape((ii, jj))
zvals, zlbl = option_utils.get_zvalues(fp, opts.z_arg, model_stats)
show_colorbar = True
# Set common min and max for colorbar in all plots
if opts.vmin is None:
Expand Down
22 changes: 11 additions & 11 deletions bin/gwin_plot_prior
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ cp = WorkflowConfigParser(opts.config_files)
# if only section then look for subsections
# and add distributions to list
logging.info("Constructing prior")
variable_args = []
variable_params = []
dists = []
for sec in opts.sections:
section = sec.split("-")[0]
Expand All @@ -97,21 +97,21 @@ for sec in opts.sections:
name = cp.get_opt_tag(section, "name", subsection)
dist = distributions.distribs[name].from_config(
cp, section, subsection)
variable_args += dist.params
variable_params += dist.params
dists.append(dist)
variable_args = sorted(variable_args)
ndim = len(variable_args)
variable_params = sorted(variable_params)
ndim = len(variable_params)

# construct class that will return draws from the prior
prior = distributions.JointDistribution(variable_args, *dists)
prior = distributions.JointDistribution(variable_params, *dists)

# get all points in space to calculate PDF
logging.info("Getting grid of points")
vals = numpy.zeros(shape=(ndim,opts.bins))
bounds = [{}] * ndim
for dist in dists:
for param in dist.params:
idx = variable_args.index(param)
idx = variable_params.index(param)
step = float(dist.bounds[param][1]-dist.bounds[param][0]) / opts.bins
vals[idx,:] = numpy.arange(dist.bounds[param][0],dist.bounds[param][1],step)
bounds[idx] = dist.bounds
Expand All @@ -121,13 +121,13 @@ pts = cartesian(vals)
logging.info("Calculating PDF")
pdf = []
for pt in pts:
pt_dict = dict([(param,pt[j]) for j,param in enumerate(variable_args)])
pt_dict = dict([(param,pt[j]) for j,param in enumerate(variable_params)])
pdf.append(sum([dist.pdf(**pt_dict) for dist in dists]))
pdf = numpy.array(pdf)

# check if only one parameter to plot PDF
logging.info("Plotting")
if len(variable_args) == 1:
if len(variable_params) == 1:
x = vals[0,:]
xmax = x.max()
xmin = x.min()
Expand All @@ -136,12 +136,12 @@ if len(variable_args) == 1:
plt.plot(x, pdf, "k", label="Prior")
plt.xlim(xmin-pad, xmax+pad)
plt.ylabel("Probability Density Function")
plt.xlabel(variable_args[0])
plt.xlabel(variable_params[0])
plt.legend()

# else make corner plot of all PDF
else:
fig = corner.corner(pts, weights=pdf, labels=variable_args,
fig = corner.corner(pts, weights=pdf, labels=variable_params,
plot_contours=False, plot_datapoints=False)

# remove the 1-D histograms
Expand All @@ -157,7 +157,7 @@ else:

# save figure with meta-data
caption_kwargs = {
"parameters" : ", ".join([param for param in variable_args])
"parameters" : ", ".join([param for param in variable_params])
}
caption = """This plot shows the probability density function (PDF) from the
prior distributions."""
Expand Down
2 changes: 1 addition & 1 deletion bin/gwin_plot_samples
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ for i, arg in enumerate(parameters):

# plot each walker as a different line on the subplot
file_parameters, cs = transforms.get_common_cbc_transforms(
parameters, fp.variable_args)
parameters, fp.variable_params)
y = fp.read_samples(file_parameters, walkers=j,
thin_start=opts.thin_start,
thin_interval=opts.thin_interval,
Expand Down
Loading

0 comments on commit 7454fad

Please sign in to comment.