Skip to content

Commit

Permalink
Merge branch 'main' into demo_lca
Browse files Browse the repository at this point in the history
  • Loading branch information
bernardinelli authored Nov 29, 2023
2 parents 12b91e7 + a74b45c commit 4e683c3
Show file tree
Hide file tree
Showing 165 changed files with 3,678 additions and 2,358 deletions.
4 changes: 4 additions & 0 deletions .git_archival.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
node: $Format:%H$
node-date: $Format:%cI$
describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$
ref-names: $Format:%D$
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,4 @@
#
# png image files
# *.png filter=lfs diff=lfs merge=lfs -text
.git_archival.txt export-subst
2 changes: 1 addition & 1 deletion .github/workflows/smoke-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
strategy:
matrix:
os: ['ubuntu-latest', 'macos-latest']
python-version: ['3.8', '3.9', '3.10']
python-version: ['3.9', '3.10', '3.11']

runs-on: ${{ matrix.os }}
steps:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/testing-and-coverage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
strategy:
matrix:
os: ['ubuntu-latest', 'macos-latest']
python-version: ['3.8', '3.9', '3.10']
python-version: ['3.9', '3.10', '3.11']

runs-on: ${{ matrix.os }}
steps:
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -168,3 +168,4 @@ checker.txt
ephemeris_output.csv
data/out/

*~
31 changes: 16 additions & 15 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
fail_fast: true
repos:

# Compare the local template version to the latest remote template version
Expand All @@ -22,21 +23,6 @@ repos:
language: system
entry: jupyter nbconvert --clear-output

# Run unit tests, verify that they pass. Note that coverage is run against
# the ./src directory here because that is what will be committed. In the
# github workflow script, the coverage is run against the installed package
# and uploaded to Codecov by calling pytest like so:
# `python -m pytest --cov=<package_name> --cov-report=xml`
- repo: local
hooks:
- id: pytest-check
name: Run unit tests
description: Run unit tests with pytest.
entry: bash -c "if python -m pytest --co -qq; then python -m pytest --cov=./src --cov-report=html; fi"
language: system
pass_filenames: false
always_run: true

# prevents committing directly branches named 'main' and 'master'.
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
Expand Down Expand Up @@ -98,3 +84,18 @@ repos:
"-D", # Flag to override settings in conf.py
"exclude_patterns=notebooks/*,_build,**.ipynb_checkpoints", # Exclude our notebooks from pre-commit
]

# Run unit tests, verify that they pass. Note that coverage is run against
# the ./src directory here because that is what will be committed. In the
# github workflow script, the coverage is run against the installed package
# and uploaded to Codecov by calling pytest like so:
# `python -m pytest --cov=<package_name> --cov-report=xml`
- repo: local
hooks:
- id: pytest-check
name: Run unit tests
description: Run unit tests with pytest.
entry: bash -c "if python -m pytest --co -qq; then python -m pytest --cov=./src --cov-report=html; fi"
language: system
pass_filenames: false
always_run: true
28 changes: 28 additions & 0 deletions CITATION.cff
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,34 @@ authors:
family-names: Young
affiliation: Queen's University Belfast
orcid: 'https://orcid.org/0000-0002-1229-2499'
- given-names: Pedro
family-names: Bernardinelli
affiliation: University of Washington
orcid: 'https://orcid.org/0000-0003-0743-9422'
- given-names: Colin
family-names: Chandler
affiliation: University of Washington
orcid: 'https://orcid.org/0000-0001-7335-1715'
- given-names: Matthew
family-names: Holman
affiliation: Center for Astrophysics - Harvard & Smithsonian
orcid: 'https://orcid.org/0000-0002-1139-4880'
- given-names: Jeremy
family-names: Kubica
affiliation: Carnegie Mellon University
orcid: ''
- given-names: Jake
family-names: Kurlander
affiliation: University of Washington
orcid: ''
- given-names: Drew
family-names: Oldag
affiliation: University of Washington
orcid: 'https://orcid.org/0000-0001-6984-8411'
- given-names: Max
family-names: West
affiliation: University of Washington
orcid: 'https://orcid.org/0009-0003-3171-3118'
repository-code: >-
https://github.com/dirac-institute/sorcha
abstract: >-
Expand Down
65 changes: 3 additions & 62 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,77 +9,18 @@
[![Documentation Status](https://readthedocs.org/projects/sorcha/badge/?version=latest)](https://sorcha.readthedocs.io/en/latest/?badge=latest)
[![astropy](http://img.shields.io/badge/powered%20by-AstroPy-orange.svg?style=flat)](http://www.astropy.org/)

Documentation: https://sorcha.readthedocs.io
[![Template](https://img.shields.io/badge/Template-LINCC%20Frameworks%20Python%20Project%20Template-brightgreen)](https://lincc-ppt.readthedocs.io/en/latest/)

Other software utilities can be found in this github repository: https://github.com/dirac-institute/sorcha_communiity_utils/
Documentation: https://sorcha.readthedocs.io

Other software utilities can be found in this github repository: [https://github.com/dirac-institute/sorcha-addons](https://github.com/dirac-institute/sorcha-addons)
## developer best practices
* Data sets should be moved to the `data` folder, have a readme.txt or readme.md to explain where the data came from as well as a time stamp in the readme.txt.
* Data sets that are used for unit testing should live in `tests/data`.
* All required input files for the main software or unit tests should have extensions that clearly describe the file format (e.g. .csv, .txt, .db, .fits)
* If you are working on addressing a specific issue ticket, assign yourself the ticket and set the status to "in progress"
* When making a pull request that closes an issue, cite the issue ticket in the pull request summary


## Making pip work
When making edits to the code, its likely that the only thing you need to worry
about is making sure the imports are consistent. There are two places where this
is important, the sorcha/sorcha.py file and the sorcha/modules/__init__.py file.
If you want to add, remove or change the name of a module, then these files need
to be updated to reflect that.

Within the sorcha/modules/__init__.py file it will look something like this:
```
from . import PPAddUncertainties
```
And in the sorcha.py file it will look something like this:
```
from sorcha.modules import PPAddUncertainties
```
When adding, removing or changing the name of any module, just make sure that
you've updated both of these files to reflect the changes.


If you want to make some more major changes, e.g. adding another utility to the
command line, then there are two things to keep in mind. Firstly, the python file
containing the utility has to be formatted in a specific way and secondly, the
setup.py file has to be changed.

Examples of the file formatting can be seen in sorcha.py, makeConfigOIF and
makeConfigPP, so you can try to follow that. In short, you need to define the
main containing the parser arguments, e.g.

```
def main():
parser=argparse.ArgumentParser(description='creating config file(s) for Objects in Field')
parser.add_argument("o", help="orbits file", type=str)
etc....
```

and then after, include:
```
if __name__=='__main__':
main()
```

pyproject.toml is the file which contains the information for the install.
This contains some general information on things contact information for the author.
Generally this file won't have to be changed unless you want to:

- Update the author info etc. : this can just be manually changed.

- Add a prerequisite package e.g. pandas: this can be added in the ``dependencies`` section just by adding the name of the prerequisite package to the list. This means that the package will be installed alongside ``sorcha``. Specific versions can be added e.g. 'pandas==1.3.5'

- Add a new command line argument: In the case of adding new utilities (e.g. the config file generators). This is a bit more complicated and relies on the file being in the format given above. If this is the case then a new function can be added to the entry_points

```
[project.scripts]
makeConfigSorcha = "sorcha.utilities.makeConfigPP:main"
},
```
where makeConfigSorcha is the name of the command line argument, and
``sorcha.utilities.makeConfigPP:main`` is the file path.

## Collaboration
This effort is a collaboration between Queen's University Belfast, the University of Washington's DiRAC Institute,
the University of Illinois at Urbana-Champaign, the Center for Astrophysics | Harvard & Smithsonian, and LINCC Frameworks (through the LINCC Frameworks Incubator Program).
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/cprofile_ReadPointingDatabase.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

filter_list = ["u", "g", "r", "i", "z", "y"]

sql_query = "SELECT observationId, observationStartMJD, filter, seeingFwhmGeom, seeingFwhmEff, fiveSigmaDepth, fieldRA, fieldDec, rotSkyPos FROM observations order by observationId"
sql_query = "SELECT observationId, observationStartMJD as observationStartMJD_TAI, filter, seeingFwhmGeom, seeingFwhmEff, fiveSigmaDepth, fieldRA, fieldDec, rotSkyPos FROM observations order by observationId"

cProfile.run("PPReadPointingDatabase('../demo/baseline_v2.0_1yr.db', filter_list, sql_query)", "restats")

Expand Down
8 changes: 4 additions & 4 deletions demo/OIFconfig_benchmark.ini
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ field1 = 1
nfields = 216011
mpcobscode file = obslist.dat
telescope = I11
surveydbquery = SELECT observationId,observationStartMJD,fieldRA,fieldDEC,rotSkyPos FROM observations order by observationStartMJD
surveydbquery = SELECT observationId,observationStartMJD as observationStartMJD_TAI,fieldRA,fieldDEC,rotSkyPos FROM observations order by observationStartMJD_TAI

[CAMERA]
threshold = 5
Expand Down Expand Up @@ -135,8 +135,8 @@ SSP_maximum_time = 0.0625
# -u or --outfile Output file path.
# -t or --stem Output file stem.

# Output format. The 'separatelycsv' option saves output into separate CSVs for each object.
# Options: csv, separatelycsv, sqlite3, hdf5
# Output format.
# Options: csv, sqlite3, hdf5
output_format = csv

# Size of output. Controls which columns are in the output files.
Expand All @@ -156,7 +156,7 @@ magnitude_decimals = 3
# They may have unexpected results or break the code entirely.

# SQL query for extracting data from the pointing database.
pointing_sql_query = SELECT observationId, observationStartMJD, filter, seeingFwhmGeom, seeingFwhmEff, fiveSigmaDepth, fieldRA, fieldDec, rotSkyPos FROM observations order by observationId
pointing_sql_query = SELECT observationId, observationStartMJD as observationStartMJD_TAI, filter, seeingFwhmGeom, seeingFwhmEff, fiveSigmaDepth, fieldRA, fieldDec, rotSkyPos FROM observations order by observationId

# The unique name of the lightcurve model to use. Defined in the ``name_id`` method of the subclasses of AbstractLightCurve
lc_model = none
8 changes: 4 additions & 4 deletions demo/OIFconfig_lca.ini
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ field1 = 1
nfields = 216011
mpcobscode file = obslist.dat
telescope = I11
surveydbquery = SELECT observationId,observationStartMJD,fieldRA,fieldDEC,rotSkyPos FROM observations order by observationStartMJD
surveydbquery = SELECT observationId,observationStartMJD as observationStartMJD_TAI,fieldRA,fieldDEC,rotSkyPos FROM observations order by observationStartMJD_TAI

[CAMERA]
threshold = 5
Expand Down Expand Up @@ -131,8 +131,8 @@ SSP_maximum_time = 0.0625
# -u or --outfile Output file path.
# -t or --stem Output file stem.

# Output format. The 'separatelycsv' option saves output into separate CSVs for each object.
# Options: csv, separatelycsv, sqlite3, hdf5
# Output format.
# Options: csv, sqlite3, hdf5
output_format = csv

# Size of output. Controls which columns are in the output files.
Expand All @@ -152,7 +152,7 @@ magnitude_decimals = 3
# They may have unexpected results or break the code entirely.

# SQL query for extracting data from the pointing database.
pointing_sql_query = SELECT observationId, observationStartMJD, filter, seeingFwhmGeom, seeingFwhmEff, fiveSigmaDepth, fieldRA, fieldDec, rotSkyPos FROM observations order by observationId
pointing_sql_query = SELECT observationId, observationStartMJD as observationStartMJD_TAI, filter, seeingFwhmGeom, seeingFwhmEff, fiveSigmaDepth, fieldRA, fieldDec, rotSkyPos FROM observations order by observationId

# The unique name of the lightcurve model to use. Defined in the ``name_id`` method of the subclasses of AbstractLightCurve
lc_model = sinusoidal
2 changes: 1 addition & 1 deletion demo/OIFconfig_test.ini
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ field1 = 1
nfields = 216011
mpcobscode file = obslist.dat
telescope = I11
surveydbquery = SELECT observationId,observationStartMJD,fieldRA,fieldDEC,rotSkyPos FROM observations order by observationStartMJD
surveydbquery = SELECT observationId,observationStartMJD as observationStartMJD_TAI,fieldRA,fieldDEC,rotSkyPos FROM observations order by observationStartMJD_TAI

[CAMERA]
threshold = 5
Expand Down
40 changes: 18 additions & 22 deletions demo/PPConfig_test.ini
Original file line number Diff line number Diff line change
Expand Up @@ -3,31 +3,25 @@

[INPUT]

# The simulation used for the ephemeris input.
# The simulation used for the ephemeris input.
# ar=ASSIST+REBOUND interal ephemeris generation
# external=providing an external input file from the command line
# Options: "ar", "external"
ephemerides_type = external

# Format for ephemeris simulation output file. If reading from an existing temporary ephemeris
# database, this will be ignored.
# Format for ephemeris simulation input file if a file is specified at the command line.
# If reading from an existing temporary ephemeris database, this will be ignored.
# Options: csv, whitespace, hdf5
eph_format = csv

# Sorcha chunk size: how many objects should be processed at once?
size_serial_chunk = 5000

# Format for orbit/colour/brightness/cometary data files.
# Format for the orbit, physical parameters, and complex physical parameters input files.
# Options: csv or whitespace
aux_format = whitespace


[ACTIVITY]

# Flag for cometary activity. If not none, a cometary parameters file ust be specified at the command line.
# Value is expected to be the unique name of the activity model to use.
# The unique name is defined in the ``name_id`` method of the subclasses of AbstractCometaryActivity
comet_activity = none


[FILTERS]

# Filters of the observations you are interested in, comma-separated.
Expand Down Expand Up @@ -95,24 +89,26 @@ fading_function_peak_efficiency = 1.


[LINKINGFILTER]
# Remove this section if you do not wish to run the SSP linking filter.

# SSP detection efficiency. Which fraction of the observations of an object will
# the automated solar system processing pipeline successfully link? Float.
# SSP detection efficiency. Which fraction of the objects will
# the automated Rubin Solar System Processing (SSP) pipeline successfully link? Float.
SSP_detection_efficiency = 0.95

# Length of tracklets. How many observations of an object during one night are
# required to produce a valid tracklet?
SSP_number_observations = 2

# Minimum separation (in arcsec) between two observations of an object required for the linking software to distinguish them as separate and therefore as a valid tracklet.

# Minimum separation (in arcsec) between two observations of an object required
# for the linking software to distinguish them as separate and therefore as a valid tracklet.
SSP_separation_threshold = 0.5

# Maximum time separation (in days) between subsequent observations in a tracklet. Default is 0.0625 days (90mins).
# Maximum time separation (in days) between subsequent observations # Maximum time separation (in days) between subsequent observations in a tracklet.
# Default is 0.0625 days (90mins). a tracklet. Default is 0.0625 days (90mins).
SSP_maximum_time = 0.0625

# Number of tracklets for detection. How many tracklets are required to classify
# an object as detected? Must be an int.
# an object as detected?
SSP_number_tracklets = 3

# The number of tracklets defined above must occur in <= this number of days to
Expand Down Expand Up @@ -144,11 +140,11 @@ ar_healpix_order = 6

[OUTPUT]

# Output format. The 'separatelycsv' option saves output into separate CSVs for each object.
# Options: csv, separatelycsv, sqlite3, hdf5
# Output format of the output file[s]
# Options: csv, sqlite3, hdf5
output_format = csv

# Size of output. Controls which columns are in the output files.
# Size of output. Controls which columns are in the output file[s].
# Options are "basic" and "all", which returns all columns.
output_size = basic

Expand All @@ -165,7 +161,7 @@ magnitude_decimals = 3
# They may have unexpected results or break the code entirely.

# SQL query for extracting data from the pointing database.
pointing_sql_query = SELECT observationId, observationStartMJD, filter, seeingFwhmGeom, seeingFwhmEff, fiveSigmaDepth, fieldRA, fieldDec, rotSkyPos FROM observations order by observationId
pointing_sql_query = SELECT observationId, observationStartMJD as observationStartMJD_TAI, visitTime, filter, seeingFwhmGeom, seeingFwhmEff, fiveSigmaDepth, fieldRA, fieldDec, rotSkyPos FROM observations order by observationId

# SNR limit: drop observations equal to or below this SNR threshold.
# Cannot be used at the same time as the magnitude threshold. Must be a float.
Expand Down
Binary file modified demo/baseline_v2.0_1yr.db
Binary file not shown.
Loading

0 comments on commit 4e683c3

Please sign in to comment.