diff --git a/CIPP/Makefile b/CIPP/Makefile new file mode 100644 index 0000000..ceedf8b --- /dev/null +++ b/CIPP/Makefile @@ -0,0 +1,2 @@ +test: + python -m unittest diff --git a/CIPP/README.rst b/CIPP/README.rst index 0dd74fc..1292c99 100644 --- a/CIPP/README.rst +++ b/CIPP/README.rst @@ -6,12 +6,61 @@ CIPPtools --------------------------------- -These are a very loose collection of Python programs for helping someone +These are a very loose collection of Python 3 programs for helping someone serving as a HiRISE CIPP perform various tasks. + * Free software: Apache Software License 2.0 +Conversion +---------- +The ``ptf2csv.py`` and ``csv2ptf.py`` function mostly like the Perl +equivalents that have been in use by HiRISE for a decade. + +The difference is that ``ptf2csv.py`` has an option to add a +'HiReport Link' column to the end of the output CSV file. This +column contains a formula that when read in by most spreadsheet +programs will result in a clickable link in that cell of your +spreadsheet to allow easy checking of HiReport. + +And ``csv2ptf.py`` basically ignores any non-PTF columns in your +.csv file (like maybe that HiReport column that you had ``ptf2csv.py`` +put in, or any other columns that you might have added). + + +Working with the HiTList +------------------------ +``prioritize_by_orbit.py`` can be used on the HiTList you get from +your HiTS to clearly flag (by changing their existing priority from +positive to negative) which lower-priority observations in each +orbit are 'excluded' by the latitude-exclusion zone (defaults to +40 degrees in latitude on either side of an observation) of higher +priority observations. + +``priority_rewrite.py`` can be used near the end of your process when you +have a bunch of observations that all have the same priority that each need +a unique priority. This program takes that block of entries, and assigns unique +priorities based on latitude. + +``orbit_count.py`` again, a program of the same name as a Perl program that we have. +The difference here is that this one is 'aware' of the possible negative priorities +given by ``prioritize_by_orbit.py``, prints out an observation count histogram (how many +orbits have 3 observations, etc.) Although it doesn't report data volume like +the Perl verison does (but it could). + + +TOS +--- +What made it through TOS? Did my WTH make it through TOS? Did my WTH even make +it into the HiTLIST? Did you elminiate my WTH from the HiTList, you monster? + +Or replace 'WTH' with HiKERs or CaSSIS targets or really any list of suggestions +that you want to know are contained in a PTF. + +All of these questions can be answered with a PTF, text copied from the WTH list +wiki page, and ``tos_success.py``. + WARNING ------- -**You'll notice there aren't any tests, be warned!** +**There are some tests, but user beware.** diff --git a/CIPP/orbit_count.py b/CIPP/orbit_count.py new file mode 100755 index 0000000..b36c888 --- /dev/null +++ b/CIPP/orbit_count.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python +"""Scans a PTF evaluating the state of priorities and performing counts.""" + +# Copyright 2019, Ross A. Beyer (rbeyer@seti.org) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import logging +from collections import Counter +from itertools import groupby + +import priority_rewrite as pr + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('in_file', help="a .ptf or .csv file") + + args = parser.parse_args() + + logging.basicConfig(format='%(levelname)s: %(message)s') + + ptf_in = pr.get_input(args.in_file) + + print('\n'.join(format_report(orbit_count(ptf_in)))) + + +def orbit_count(records: list) -> str: + reports = list() + + sorted_by_o = sorted(records, + key=lambda x: int(x['Orbit Number'][:-1])) + for orbit, g in groupby(sorted_by_o, + key=lambda x: int(x['Orbit Number'][:-1])): + report_dict = {'orbit': orbit, 'pos': 0, 'neg': 0} + + for rec in g: + if int(rec['Request Priority']) > 0: + report_dict['pos'] += 1 + else: + report_dict['neg'] += 1 + + reports.append(report_dict) + + return reports + + +def format_report(records: list) -> list: + formatted_lines = list() + + # Prepare header and set widths + header = {'orbit': 'Orbit ({})'.format(len(records)), + 'pos': '# obs', 'neg': '# negative obs'} + o_width = len(header['orbit']) + p_width = len(header['pos']) + n_width = len(header['neg']) + rules = {'orbit': '-' * o_width, + 'pos': '-' * p_width, + 'neg': '-' * n_width} + + # Accumulate counts and fuss with removing zeros from output. + pos_counts = Counter() + neg_count = 0 + str_reports = list() + for r in records: + str_d = {'orbit': str(r['orbit']), 'pos': '', 'neg': ''} + pos_counts.update((r['pos'],)) + neg_count += r['neg'] + for pn in ('pos', 'neg'): + if r[pn] == 0: + str_d[pn] = '' + else: + str_d[pn] = str(r[pn]) + str_reports.append(str_d) + + # The meat of formatting each line of the report: + lines_to_format = [header, rules] + str_reports + for d in lines_to_format: + o_str = '{orb:<{width}}'.format(orb=d['orbit'], width=o_width) + p_str = '{pos:^{width}}'.format(pos=d['pos'], width=p_width) + n_str = '{neg:<{width}}'.format(neg=d['neg'], width=n_width) + formatted_lines.append(f'{o_str} {p_str} {n_str}') + + # Summary line at the bottom: + count_summ = '{label:-^{width}}'.format(label='Counts', width=o_width) + count_summ += ' {} {}'.format(rules['pos'], rules['neg']) + formatted_lines.append(count_summ) + pos_count = sum(k * v for k, v in pos_counts.items()) + t_sum = '{sum:^#{o_width}}'.format(sum=pos_count + neg_count, + o_width=o_width) + t_pos = f'{pos_count:^#{p_width}}' + t_neg = f'{neg_count:<#{n_width}}' + formatted_lines.append('{}={}+ {}'.format(t_sum, t_pos, t_neg)) + formatted_lines.append('') + + num_of_obs = list() + num_of_orbs = list() + for k in sorted(list(pos_counts.keys()), reverse=True): + width = len(str(pos_counts[k])) + if width < 3: + width = 3 + num_of_obs.append(f'{k:^#{width}}') + num_of_orbs.append(f'{pos_counts[k]:^#{width}}') + + # formatted_lines.append('Orbit Count Histogram {}'.format(' '.join())) + formatted_lines.append('# of Observations: {}'.format(' '.join(num_of_obs))) + formatted_lines.append('# of Orbits : {}'.format(' '.join(num_of_orbs))) + formatted_lines.append('') + + # Set up the empty orbit report + empty_orbits = find_empty_orbits(records) + formatted_lines.append('Empty Orbits') + formatted_lines.append('------------') + + return formatted_lines + list(map(str, empty_orbits)) + + +def find_empty_orbits(records: list) -> list: + orbs = list(map(lambda x: x['orbit'], records)) + return sorted(set(range(orbs[0], orbs[-1])) - set(orbs)) + + +if __name__ == "__main__": + main() diff --git a/CIPP/prioritize_by_orbit.py b/CIPP/prioritize_by_orbit.py new file mode 100755 index 0000000..991abd9 --- /dev/null +++ b/CIPP/prioritize_by_orbit.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +"""Scans a PTF, grouping records by orbit, and attempts to deconflict + observations in that orbit. + + This is NOT a substitute for reviewing the PTF yourself. + + The algorithm will examine each orbit (based on the PTF Orbit Number + field, ignoring any Orbit Alternatives), and will guarantee that + at most, only the user-specified number of observations will remain + as positive priorities after it runs, all other records will be set + to negative priorities (allowing inspection afterwards). + + For each orbit, the alrgorithm will begin with the highest + prioritized observation, and deprioritize any observations within + the latitude exclusion range in either direction, then find the + next highest, and so on. When faced with observations that have + the same priority, it will give preference to the observation + that is closest to the equator. +""" + +# Copyright 2020, Ross A. Beyer (rbeyer@seti.org) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# TODO: write in a mechanism that allows the user to optimize for more +# observations per orbit, rather than strictly giving the highest priority +# plus the lowest latitude the top spot. Doing so may 'exclude' two other +# observations, and if one had the same priority, but didn't 'exclude' the +# other, you might be able to fit more observations on an orbit. +# +# TODO: may also want something other than abs(latitude) to be a driver, +# possibly time since coming out of eclipse or something. Lots of +# possibilities. + +import argparse +import logging + +from itertools import groupby + +import priority_rewrite as pr + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('-o', '--output', required=False) + parser.add_argument('--per_orbit', required=False, default=4, + help="The number of observations to keep in" + "an orbit.") + parser.add_argument('--latitude_exclude', required=False, default=40, + help="The amount of latitude on either side " + "(so it is a half-width) of an observation " + "that will be excluded for consideration.") + parser.add_argument('-n', '--dry_run', required=False, + action='store_true', help='Perform the rearranging ' + 'but do not write out results.') + parser.add_argument('in_file', help="a .ptf or .csv file") + + args = parser.parse_args() + + logging.basicConfig(format='%(levelname)s: %(message)s') + + ptf_in = pr.get_input(args.in_file) + + new_ptf_records = prioritize_by_orbit(ptf_in, + args.per_orbit, + args.latitude_exclude) + + # This sorting ignores the 'a' or 'd' markers on Orbits. + new_ptf_records.sort(key=lambda x: int(x['Orbit Number'][:-1])) + + if args.dry_run: + pass + else: + out_str = pr.write_output(ptf_in, new_ptf_records, args.output) + if out_str: + print(out_str) + + +class intervals(object): + + def __init__(self, half_width: float): + self.intervals = list() # a list of two-tuples + self.half_width = float(half_width) + + def add(self, point: float): + p = float(point) + new_interval = ((p - self.half_width), (p + self.half_width)) + intervals = self.intervals + [new_interval] + + # This interval merging logic is from + # https://codereview.stackexchange.com/questions/69242/merging-overlapping-intervals + sorted_by_lower_bound = sorted(intervals, key=lambda x: x[0]) + merged = list() + + for higher in sorted_by_lower_bound: + if not merged: + merged.append(higher) + else: + lower = merged[-1] + # test for intersection between lower and higher: + # we know via sorting that lower[0] <= higher[0] + if higher[0] <= lower[1]: + upper_bound = max(lower[1], higher[1]) + # replace by merged interval: + merged[-1] = (lower[0], upper_bound) + else: + merged.append(higher) + + self.intervals = merged + return + + def is_in(self, point: float): + p = float(point) + for i in self.intervals: + if p >= i[0] and p <= i[1]: + return True + else: + return False + + +def prioritize_by_orbit(records: list, observations=4, + latitude_exclude=40) -> list: + '''Rewrites priorities by orbit.''' + + new_records = list() + sorted_by_o = sorted(records, + key=lambda x: int(x['Orbit Number'][:-1])) + for orbit, g in groupby(sorted_by_o, + key=lambda x: int(x['Orbit Number'][:-1])): + exclude = intervals(latitude_exclude) + obs_count = 0 + by_orbit = list(g) + by_orbit.sort(key=lambda x: int(x['Request Priority']), reverse=True) + for pri, pri_g in groupby(by_orbit, + key=lambda x: int(x['Request Priority'])): + recs = list(pri_g) + if len(recs) != 1: + # need to prioritize these by latitude + recs = pr.priority_rewrite(recs, keepzero=True) + + for r in sorted(recs, key=lambda x: int(x['Request Priority']), + reverse=True): + if(obs_count < observations and + not exclude.is_in(r['Latitude'])): + exclude.add(r['Latitude']) + obs_count += 1 + r['Request Priority'] = pri + else: + r['Request Priority'] = -1 * pri + + new_records.append(r) + + return new_records + + +if __name__ == "__main__": + main() diff --git a/CIPP/priority_rewrite.py b/CIPP/priority_rewrite.py index 18c4998..4b24093 100755 --- a/CIPP/priority_rewrite.py +++ b/CIPP/priority_rewrite.py @@ -51,7 +51,7 @@ def main(): new_ptf_records = priority_rewrite(ptf_in, args.reset, args.keepzero) - new_ptf_records.sort(key=lambda x: int(x['Request priority']), reverse=True) + new_ptf_records.sort(key=lambda x: int(x['Request Priority']), reverse=True) if args.dry_run: pass @@ -66,7 +66,7 @@ def priority_rewrite(records, reset_str=None, keepzero=False) -> list: ''' count = collections.Counter() for r in records: - count[int(r['Request priority'])] += 1 + count[int(r['Request Priority'])] += 1 reset = make_reset_dict(reset_str, count) @@ -90,7 +90,7 @@ def priority_rewrite(records, reset_str=None, keepzero=False) -> list: if is_enough_space(pri, next_pri, count[pri]): for j, r in enumerate(pri_records): d = collections.OrderedDict(r) - d['Request priority'] = pri + j + d['Request Priority'] = pri + j new_records.append(d) else: logging.warning('Starting at {} we need {} spots, but the next ' @@ -131,7 +131,7 @@ def get_records_for_this_priority(pri: int, records: list, reset: dict) -> list: for (k, v) in reset.items(): if pri == v: pri = k - out_records = list(filter(lambda x: int(x['Request priority']) == pri, + out_records = list(filter(lambda x: int(x['Request Priority']) == pri, records)) return out_records diff --git a/CIPP/ptf.py b/CIPP/ptf.py index 2cc94c1..2742123 100644 --- a/CIPP/ptf.py +++ b/CIPP/ptf.py @@ -143,9 +143,10 @@ def _dump_it(self, f: io.TextIOBase) -> io.TextIOBase: for line in c: f.write('# ' + line + '\n') f.write('#\n') - f.write('# ' + ','.join(x.title() for x in fieldnames) + '\n') + # f.write('# ' + ','.join(x.title() for x in fieldnames) + '\n') f.write('# ' + ','.join(map(str, list(range(1, len(fieldnames) + 1)))) + '\n') - f.write('# ' + ','.join(x.capitalize() for x in fieldnames) + '\n') + # f.write('# ' + ','.join(x.capitalize() for x in fieldnames) + '\n') + f.write('# ' + ','.join(fieldnames) + '\n') writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore', restval='') diff --git a/CIPP/ptf2csv.py b/CIPP/ptf2csv.py index ed0bb30..f0774fe 100755 --- a/CIPP/ptf2csv.py +++ b/CIPP/ptf2csv.py @@ -26,6 +26,12 @@ def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-o', '--output', required=False, default='.csv') + parser.add_argument('-l', '--links', required=False, action='store_true', + help='This flag will write a final element to each ' + 'output record with the HiReport URL as a ' + 'spreadsheet formula. When this .csv file is ' + 'imported by a spreadsheet program, there ' + 'should be clickable links in those cells.') parser.add_argument('ptf', metavar="some.ptf-file") args = parser.parse_args() @@ -38,6 +44,23 @@ def main(): ptf_in = ptf.load(args.ptf) + if args.links: + # Add HiReport Links + link_key = 'HiReport URLs' + ptf_in.fieldnames.append(link_key) + for i, rec in enumerate(ptf_in): + new_rec = rec + u = 'https://hireport.lpl.arizona.edu/hireport/suggestion/' + sugg = rec['Team Database ID'] + url = u + sugg + f = '=HYPERLINK("{}", "HiReport {}")'.format(url, sugg) + + # If the formula is too fancy, can always fall back to + # just the URL: + # new_rec[link_key] = url + new_rec[link_key] = f + ptf_in[i] = new_rec + with open(csv_path, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=ptf_in.fieldnames) writer.writeheader() diff --git a/CIPP/test_priority_rewrite.py b/CIPP/test_priority_rewrite.py index 8bc628b..61fc802 100644 --- a/CIPP/test_priority_rewrite.py +++ b/CIPP/test_priority_rewrite.py @@ -22,7 +22,7 @@ import priority_rewrite as pr -hitlist = '''Instrument set,Predict time,Latitude,Longitude,Elevation,Observation type,Orbit number,Orbit alternatives,Observation duration,Setup duration,Orbital data table,Parameters table,Sequence filename,Downlink priority,Product ID,Spare 1,Spare 2,Spare 3,Spare 4,Comment,Request priority,Coordinated track history,Raw data volume,Team database ID,Request category,Compression,Pixel scale,Observation mode,Ancillary data,LsubS,Roll angle, +hitlist = '''Instrument Set,Predict Time,Latitude,Longitude,Elevation,Observation Type,Orbit Number,Orbit Alternatives,Observation Duration,Setup Duration,Orbital Data Table,Parameters Table,Sequence Filename,Downlink Priority,Product ID,Spare 1,Spare 2,Spare 3,Spare 4,Comment,Request Priority,Coordinated Track History,Raw Data Volume,Team Database ID,Request Category,Compression,Pixel Scale,Observation Mode,Ancillary Data,LsubS,Roll Angle, H,2019-109T03:06:32.050,18.169,336.125,-3.547,3,59659a,59659a 59725a,30,321,,,N/A,X,,,,1,,164320 Oxia Planum ExoMars Landing Site Future Exploration/Landing Sites PUB,16500,,0,164320,MH-MEP-REQ-CTX,enable,,,0,13,-4.222,HiReport H,2019-114T06:32:07.110,18.415,335.529,-3.663,4,59725a,59725a 59659a,30,321,,,N/A,X,,,,1,,169940 Oxia Planum ExoMars Landing Site Future Exploration/Landing Sites PUB,16300,,0,169940,MH-MEP-REQ-CTX,enable,,,0,15.5,-17.764,HiReport H,2019-112T07:39:55.229,-4.734,298.577,-4.189,3,59700a,59700a 59766a,30,321,,,N/A,X,,,,1,Seasonal: ESP_043876_1755:118228,118256 Monitor slopes in Juventae Chasma Mass Wasting Processes,15000,C,,118256,IO-REQ-CTX,enable,,,0,14.6,7.786,HiReport @@ -66,9 +66,9 @@ def test_sort_and_filter(self): pr.sort_and_filter(i, keepzero=True)) def test_get_records_for_this_priority(self): - r = [{'Request priority': 800, 'Name': 'One at priority 800'}, - {'Request priority': 800, 'Name': 'Two at Priority 800'}, - {'Request priority': 888, 'Name': 'Oddball'}] + r = [{'Request Priority': 800, 'Name': 'One at priority 800'}, + {'Request Priority': 800, 'Name': 'Two at Priority 800'}, + {'Request Priority': 888, 'Name': 'Oddball'}] self.assertEqual(2, len(pr.get_records_for_this_priority(800, r, {1: 2, 3: 4}))) self.assertEqual(1, len(pr.get_records_for_this_priority(800, @@ -80,13 +80,13 @@ def test_is_enough_space(self): self.assertFalse(pr.is_enough_space(10, 14, 5)) def test_priority_rewrite(self): - r = [{'Request priority': 800, 'Name': 'One at priority 800', + r = [{'Request Priority': 800, 'Name': 'One at priority 800', 'Latitude': 40}, - {'Request priority': 800, 'Name': 'Two at Priority 800', + {'Request Priority': 800, 'Name': 'Two at Priority 800', 'Latitude': 30}, - {'Request priority': 888, 'Name': 'Oddball', + {'Request Priority': 888, 'Name': 'Oddball', 'Latitude': 25}] - self.assertEqual(collections.OrderedDict([('Request priority', 700), + self.assertEqual(collections.OrderedDict([('Request Priority', 700), ('Name', 'One at priority 800'), ('Latitude', 40)]), pr.priority_rewrite(r, '800:700')[0]) diff --git a/CIPP/test_ptf.py b/CIPP/test_ptf.py index 815c885..3c15dcf 100644 --- a/CIPP/test_ptf.py +++ b/CIPP/test_ptf.py @@ -146,7 +146,7 @@ def test_parse(self): H,2019-104T01:33:31.327,19.476,312.078,-4.305,4,59594a,59594a,30.00,321.0,,,N/A,X,,,0.582,,ESP_055321_1995:154996 r=-1 i=53 Ls=176 Prev=13d Next=316d URGENT,154998 Summit pit of Santa Fe Crater Geologic Contacts/Stratigraphy - urgent for stereo completion,14005,,0.000,154998,IO-REQ-CTX,enable,,,2,10.6,-21.075''' (d, c, f, r) = ptf.parse(ptf_str_no_header) - self.assertEquals(ptf.fieldnames, f) + self.assertEqual(ptf.fieldnames, f) def test_load(self): loaded = ptf.loads(ptf_str)