From 58d63f9a91717c9582db9c4c00ae09d39247f19e Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Mon, 18 Dec 2023 18:24:21 -0800 Subject: [PATCH 01/26] move documentation into documenteer[guide] form --- doc/.gitignore | 4 - doc/Makefile | 38 ----- doc/README.md | 1 - doc/conf.py | 160 ------------------ doc/index.rst | 27 ---- doc/rs_data/index.rst | 18 --- doc/rs_moving_objects/index.rst | 25 --- doc/rs_phot_utils/index.rst | 19 --- doc/rs_satellite_constellations/index.rst | 17 -- doc/rs_scheduler/index.rst | 32 ---- doc/rs_scheduler/output_schema.rst | 94 ----------- doc/rs_selfcal/index.rst | 17 -- doc/rs_site_models/index.rst | 20 --- doc/rs_skybrightness/index.rst | 25 --- doc/rs_skybrightness_pre/index.rst | 20 --- doc/rs_utils/index.rst | 19 --- doc/rubin_sim/index.rst | 187 ---------------------- doc/toc.rst | 20 --- docs/.gitignore | 3 + docs/Makefile | 20 +++ docs/api.rst | 23 +++ docs/conf.py | 12 ++ docs/data-api.rst | 12 ++ docs/data-download.rst | 70 ++++++++ docs/documenteer.toml | 11 ++ docs/index.rst | 21 +++ docs/installation.rst | 85 ++++++++++ docs/introduction.rst | 37 +++++ docs/maf-api-batches.rst | 12 ++ docs/maf-api-db.rst | 12 ++ docs/maf-api-maf-contrib.rst | 12 ++ docs/maf-api-maps.rst | 12 ++ docs/maf-api-metricbundles.rst | 12 ++ docs/maf-api-metrics.rst | 12 ++ docs/maf-api-plots.rst | 12 ++ docs/maf-api-run-comparison.rst | 12 ++ docs/maf-api-slicers.rst | 12 ++ docs/maf-api-stackers.rst | 12 ++ docs/maf-api-utils.rst | 12 ++ docs/maf-api.rst | 21 +++ doc/rs_maf/index.rst => docs/maf.rst | 30 ++-- {doc => docs}/metric_list.py | 3 +- docs/moving-objects-api.rst | 12 ++ docs/moving-objects.rst | 22 +++ docs/phot-utils-api.rst | 12 ++ docs/phot-utils.rst | 11 ++ docs/satellite-constellations-api.rst | 12 ++ docs/satellite-constellations.rst | 13 ++ docs/selfcal-api.rst | 12 ++ docs/selfcal.rst | 15 ++ docs/skybrightness-api.rst | 12 ++ docs/skybrightness.rst | 23 +++ docs/user-guide.rst | 27 ++++ 53 files changed, 632 insertions(+), 762 deletions(-) delete mode 100644 doc/.gitignore delete mode 100644 doc/Makefile delete mode 100644 doc/README.md delete mode 100644 doc/conf.py delete mode 100644 doc/index.rst delete mode 100644 doc/rs_data/index.rst delete mode 100644 doc/rs_moving_objects/index.rst delete mode 100644 doc/rs_phot_utils/index.rst delete mode 100644 doc/rs_satellite_constellations/index.rst delete mode 100644 doc/rs_scheduler/index.rst delete mode 100644 doc/rs_scheduler/output_schema.rst delete mode 100644 doc/rs_selfcal/index.rst delete mode 100644 doc/rs_site_models/index.rst delete mode 100644 doc/rs_skybrightness/index.rst delete mode 100644 doc/rs_skybrightness_pre/index.rst delete mode 100644 doc/rs_utils/index.rst delete mode 100644 doc/rubin_sim/index.rst delete mode 100644 doc/toc.rst create mode 100644 docs/.gitignore create mode 100644 docs/Makefile create mode 100644 docs/api.rst create mode 100644 docs/conf.py create mode 100644 docs/data-api.rst create mode 100644 docs/data-download.rst create mode 100644 docs/documenteer.toml create mode 100644 docs/index.rst create mode 100644 docs/installation.rst create mode 100644 docs/introduction.rst create mode 100644 docs/maf-api-batches.rst create mode 100644 docs/maf-api-db.rst create mode 100644 docs/maf-api-maf-contrib.rst create mode 100644 docs/maf-api-maps.rst create mode 100644 docs/maf-api-metricbundles.rst create mode 100644 docs/maf-api-metrics.rst create mode 100644 docs/maf-api-plots.rst create mode 100644 docs/maf-api-run-comparison.rst create mode 100644 docs/maf-api-slicers.rst create mode 100644 docs/maf-api-stackers.rst create mode 100644 docs/maf-api-utils.rst create mode 100644 docs/maf-api.rst rename doc/rs_maf/index.rst => docs/maf.rst (82%) rename {doc => docs}/metric_list.py (97%) create mode 100644 docs/moving-objects-api.rst create mode 100644 docs/moving-objects.rst create mode 100644 docs/phot-utils-api.rst create mode 100644 docs/phot-utils.rst create mode 100644 docs/satellite-constellations-api.rst create mode 100644 docs/satellite-constellations.rst create mode 100644 docs/selfcal-api.rst create mode 100644 docs/selfcal.rst create mode 100644 docs/skybrightness-api.rst create mode 100644 docs/skybrightness.rst create mode 100644 docs/user-guide.rst diff --git a/doc/.gitignore b/doc/.gitignore deleted file mode 100644 index f4ab7799e..000000000 --- a/doc/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -_build/* -source/* -api/ -rs_maf/metricList.rst diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 96408cc42..000000000 --- a/doc/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -# Makefile for Sphinx documentation - -# You can set these variables from the command line. -# SPHINXOPTS = -n -W -SPHINXOPTS = -n -SPHINXBUILD = sphinx-build -BUILDDIR = _build - -# User-friendly check for sphinx-build -ifeq ($(shell command -v $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Run pip install -r requirements.txt) -endif - -# Internal variables. -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) . - -.PHONY: help clean html linkcheck - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " linkcheck to check all external links for integrity" - @echo " clean to delete existing build products" - -clean: - rm -rf $(BUILDDIR)/* - rm -rf api/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." diff --git a/doc/README.md b/doc/README.md deleted file mode 100644 index ef49112ff..000000000 --- a/doc/README.md +++ /dev/null @@ -1 +0,0 @@ -Directory to hold documentation (in progress). \ No newline at end of file diff --git a/doc/conf.py b/doc/conf.py deleted file mode 100644 index 6e472b7b4..000000000 --- a/doc/conf.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import sys -from typing import List - -import lsst_sphinx_bootstrap_theme -from documenteer.sphinxconfig.utils import form_ltd_edition_name - -# Work around Sphinx bug related to large and highly-nested source files -sys.setrecursionlimit(2000) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.doctest", - "sphinx.ext.intersphinx", - "sphinx.ext.todo", - "sphinx.ext.ifconfig", - "sphinx_click.ext", - "sphinxcontrib.autoprogram", - "sphinx-prompt", - "numpydoc", - # "sphinx_autodoc_typehints", - "sphinx_automodapi.automodapi", - "sphinx_automodapi.smart_resolver", - "documenteer.sphinxext", -] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" - -# The master toctree document. -master_doc = "rubin_sim/index" - -# General information about the project. -project = "rubin_sim" -copyright = "2015-2022 " "Association of Universities for Research in Astronomy, Inc. (AURA)" -author = "LSST Survey Strategy Team" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -if os.getenv("TRAVIS_BRANCH", default="main") == "main": - # Use the current release as the version tag if on master - version = "Current" - release = version -else: - # Use branch name as the version tag - version = form_ltd_edition_name(git_ref_name=os.getenv("TRAVIS_BRANCH", default="main")) - release = version - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = "en" - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build", "README.rst"] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# The reST default role cross-links Python (used for this markup: `text`) -default_role = "py:obj" - -# Intersphinx - -intersphinx_mapping = { - "python": ("https://docs.python.org/3/", None), - "requests": ("https://2.python-requests.org/en/master/", None), - "developer": ("https://developer.lsst.io/", None), - "pybtex": ("https://docs.pybtex.org/", None), - "sphinx": ("https://www.sphinx-doc.org/en/master/", None), -} - -# Warnings to ignore -nitpick_ignore = [ - # This link to the base pybtex still never resolves because it is not - # in pybtex's intersphinx'd API reference. - ("py:class", "pybtex.style.formatting.plain.Style"), -] - -# -- Options for linkcheck builder ---------------------------------------- - -linkcheck_retries = 2 - -# Since Jira is currently down at this time -linkcheck_ignore = [r"^https://jira.lsstcorp.org/browse/"] - -linkcheck_timeout = 15 - -# -- Options for HTML output ---------------------------------------------- - -templates_path = [ - "_templates", - lsst_sphinx_bootstrap_theme.get_html_templates_path(), -] - -html_theme = "lsst_sphinx_bootstrap_theme" -html_theme_path = [lsst_sphinx_bootstrap_theme.get_html_theme_path()] - - -html_context = { - # Enable "Edit in GitHub" link - "display_github": True, - # https://{{ github_host|default("github.com") }}/{{ github_user }}/ - # {{ github_repo }}/blob/ - # {{ github_version }}{{ conf_py_path }}{{ pagename }}{{ suffix }} - "github_user": "lsst", - "github_repo": "rubin_sim", - "conf_py_path": "docs/", - # TRAVIS_BRANCH is available in CI, but master is a safe default - "github_version": os.getenv("TRAVIS_BRANCH", default="main") + "/", -} - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = {"logotext": project} - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -html_short_title = "rubin_sim" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path: List[str] = [] - -# If true, links to the reST sources are added to the pages. -html_show_sourcelink = False - -# -- Options for the API reference ---------------------------------------- -numpydoc_show_class_members = False - -# -- ReStructuredText epilog for common links/substitutions --------------- -rst_epilog = """ -.. _conda-forge: https://conda-forge.org -.. _conda: https://conda.io/en/latest/index.html -""" diff --git a/doc/index.rst b/doc/index.rst deleted file mode 100644 index 52c2c0b13..000000000 --- a/doc/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. py:currentmodule:: rubin_sim - -.. _rubin_sim: - -######### -rubin_sim -######### - -The rubin_sim module provides support for Rubin Observatory's LSST survey -scheduler, survey strategy analysis, and some basic simulation requirements. - -List of submodules: - -* :doc:`rubin_sim.utils ` provides some basic utilities we use throughout the rest of rubin_sim, but may be useful for other purposes. -* :doc:`rubin_sim.data ` provides a minimal tool to track the location of the associated downloaded data (see rs_download_data). -* :doc:`rubin_sim.phot_utils ` provides synthetic photometry and SNR tools. -* :doc:`rubin_sim.satellite_constellations ` tools for mega satellite constellations. -* :doc:`rubin_sim.selfcal ` generating stellar catalogs and running self-calibration. -* :doc:`rubin_sim.site_models ` provides tools to interact with our models for seeing and weather, as well as almanacs of sunrise/sunset. -* :doc:`rubin_sim.skybrightness ` can generate predicted skybrightness values for the Rubin site. -* :doc:`rubin_sim.skybrightness_pre ` provides pre-calculated versions of the skybrightness for the lifetime of LSST. -* :doc:`rubin_sim.scheduler ` provides the scheduling algorithms for Rubin and can generate (currently simulated) pointing histories. -* :doc:`rubin_sim.moving_objects ` can generate ephemerides for Solar System small bodies for a simulated LSST pointing history. -* :doc:`rubin_sim.maf ` provides metric analysis tools for simulated pointing histories. - - -:doc:`Table of Contents ` \ No newline at end of file diff --git a/doc/rs_data/index.rst b/doc/rs_data/index.rst deleted file mode 100644 index e4bcc3a06..000000000 --- a/doc/rs_data/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. py:currentmodule:: rubin_sim.data - -.. _rubin_sim.data: - -============== -rubin_sim data -============== - -The rubin_sim.data module provides a utility to interpret the location of the RUBIN_SIM_DATA_DIR -and provide information on the current simulated baseline pointing history. - - -Python API -========== - -* :ref:`rubin_sim.data api` - -* :ref:`search` \ No newline at end of file diff --git a/doc/rs_moving_objects/index.rst b/doc/rs_moving_objects/index.rst deleted file mode 100644 index 9d4d4d3a1..000000000 --- a/doc/rs_moving_objects/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. py:currentmodule:: rubin_sim.moving_objects - -.. _rubin_sim.moving_objects: - -======================= -rubin_sim MovingObjects -======================= - -The movingObjects module provides tools to generate simulated ephemerides of a population of -small bodies throughout an LSST pointing history. These ephemerides are typically used for further -analysis in :doc:`MAF < ../rs_maf/index>` to evaluate the effect of survey strategy on various populations -of Solar System objects. - -There are several populations available in the 'orbits' directory of the -rubin_sim data. Many of these populations were contributed or enhanced by the LSST Solar System Science Collaboration (SSSC). -Further documentation on these orbital populations is available in the -`LSST-SSSC "SSSC_test_populations" `_ repo. - - -Python API -========== - -* :ref:`rubin_sim.moving_objects api` - -* :ref:`search` diff --git a/doc/rs_phot_utils/index.rst b/doc/rs_phot_utils/index.rst deleted file mode 100644 index b3d389bd9..000000000 --- a/doc/rs_phot_utils/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. py:currentmodule:: rubin_sim.phot_utils - -.. _rubin_sim.phot_utils: - -======================= -rubin_sim photUtils -======================= - -The rubin_sim.photUtils module provides synthetic photometry and SNR calculation -methods for Rubin Observatory. There are expected throughput curves available in the -'throughputs' directory of the rubin_sim data download. - - -Python API -========== - -* :ref:`rubin_sim.phot_utils api` - -* :ref:`search` \ No newline at end of file diff --git a/doc/rs_satellite_constellations/index.rst b/doc/rs_satellite_constellations/index.rst deleted file mode 100644 index 539e4cf8d..000000000 --- a/doc/rs_satellite_constellations/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. py:currentmodule:: rubin_sim.satellite_constellations - -.. _rubin_sim.satellite_constellations: - -================================== -rubin_sim satellite constellations -================================== - -Tools for creating and propigating satellite mega constellations and how they could streak Rubin images. - - -Python API -========== - -* :ref:`rubin_sim.satellite_constellations api` - -* :ref:`search` \ No newline at end of file diff --git a/doc/rs_scheduler/index.rst b/doc/rs_scheduler/index.rst deleted file mode 100644 index 73607e360..000000000 --- a/doc/rs_scheduler/index.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. py:currentmodule:: rubin_sim.scheduler - -.. _rubin_sim.scheduler: - -=================== -rubin_sim Scheduler -=================== - -The feature based scheduler is available through rubin_sim, in the -`rubin_sim.scheduler` module. - -Scripts to use the scheduler code to create a simulated survey can be -found in the github repo at -`lsst-sims/sims_featureScheduler_runs2.0 -`_. -To be able to simulate a full 10 years of observations, additional skybrightness -data files must be downloaded (about 250GB), which can be done using the -script `rubin_sim/bin/rs_download_sky `_. -A typical simulation will take on the order of 6 hours to complete. - -The scheduler outputs a sqlite database containing the pointing history of -the telescope, along with information about the conditions of each -observation (visit). -Description of the :doc:`schema for the output database `. - - -Python API -========== - -* :ref:`rubin_sim.scheduler api` - -* :ref:`search` \ No newline at end of file diff --git a/doc/rs_scheduler/output_schema.rst b/doc/rs_scheduler/output_schema.rst deleted file mode 100644 index b71359850..000000000 --- a/doc/rs_scheduler/output_schema.rst +++ /dev/null @@ -1,94 +0,0 @@ -======================= -Scheduler Output Schema -======================= - -The scheduler simulations output a sqlite database with the following columns in the -`observations` table. - -All values are for the center of the field of view (e.g., airmass, altitude, etc) - -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| Column Name | Units | Description | -+=======================+===================+===================================================================================================================================================================================================+ -| airmass | unitless | airmass of the observation (center of the field) | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| altitude | degrees | Altitude of the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| azimuth | degrees | Azimuth of the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| block\_id | int | Identification ID of the block (used by some survey objects) | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| cloud | fraction | what fraction of the sky is cloudy | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| fieldDec | degrees | Declination of the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| fieldId | int | deprecated, should all be 0 or -1. | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| fieldRA | degrees | Right Ascension of the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| filter | string | The filter that was loaded for the observation, one of u,g,r,i,z,y | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| fiveSigmaDepth | magnitudes | The magnitude of an isolated point source detected at the 5-sigma level | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| flush\_by\_mjd | days | The modified Julian date the observation would have been flushed from the queue at | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| moonAlt | degrees | Altitude of the moon | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| moonAz | degrees | Azimuth of the moon | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| moonDec | degrees | Declination of the moon | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| moonDistance | degrees | Angular distance between the observation and the moon | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| moonPhase | percent (0-100) | The phase of the moon (probably the same as illumination fraction) | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| moonRA | degrees | Right Ascension of the moon | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| night | days | The night of the survey (starting at 1) | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| note | string | Note added by the scheduler, often which survey object generated the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| numExposures | int | Number of exposures in the visit | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| observationId | int | Unique observation ID | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| observationStartLST | degrees | the Local Sidereal Time at the start of the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| observationStartMJD | days | Modified Julian Date at the start of the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| paraAngle | degrees | Paralactic angle of the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| proposalId | int | deprecated | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| rotSkyPos | degrees | The orientation of the sky in the focal plane measured as the angle between North on the sky and the "up" direction in the focal plane. | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| rotTelPos | degrees | The physical angle of the rotator with respect to the mount. rotSkyPos = rotTelPos - ParallacticAngle | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| seeingFwhm500 | arcseconds | The full-width at half maximum of the PSF at 500 nm. (XXX-unsure if this is at zenith or at the pointing) | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| seeingFwhmEff | arcseconds | "Effective" full-width at half maximum, typically ~15% larger than FWHMgeom. Use FWHMeff to calculate SNR for point sources, using FWHMeff as the FWHM of a single Gaussian describing the PSF. | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| seeingFwhmGeom | arcseconds | "Geometrical" full-width at half maximum. The actual width at half the maximum brightness. Use FWHMgeom to represent the FWHM of a double-Gaussian representing the physical width of a PSF. | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| skyBrightness | mag arcsec^-2 | the brightness of the sky (in the given filter) for the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| slewDistance | degrees | distance the telescope slewed to the observation | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| slewTime | seconds | The time it took to slew to the observation. Includes any filter change time and any readout time. | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| solarElong | degrees | Solar elongation or the angular distance between the field center and the sun (0 - 180 deg). | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| sunAlt | degrees | Altitude of the sun | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| sunAz | degrees | Azimuth of the sun | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| sunDec | degrees | declination of the sun | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| sunRA | degrees | RA of the sun | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| visitExposureTime | seconds | Total exposure time of the visit | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| visitTime | seconds | Total time of the visit (could be larger than ``visitExposureTime`` if the visit had multiple exposures with readout between them) | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| cummTelAz | degrees | The cumulative azimuth rotation of the telescope mount, should be +/- 270 degrees due to cable wrap limits. | -+-----------------------+-------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/doc/rs_selfcal/index.rst b/doc/rs_selfcal/index.rst deleted file mode 100644 index 0d84f0b1b..000000000 --- a/doc/rs_selfcal/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. py:currentmodule:: rubin_sim.selfcal - -.. _rubin_sim.selfcal: - -================= -rubin_sim selfcal -================= - -Tools for simulating self-calibration. - - -Python API -========== - -* :ref:`rubin_sim.selfcal api` - -* :ref:`search` diff --git a/doc/rs_site_models/index.rst b/doc/rs_site_models/index.rst deleted file mode 100644 index 805829b13..000000000 --- a/doc/rs_site_models/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. py:currentmodule:: rubin_sim.site_models - -.. _rubin_sim.site_models: - -===================== -rubin_sim site_models -===================== - -The rubin_sim.site_models module provides tools to read the simulated site weather data (seeing and cloud) -and provide those to the scheduler in a manner useful for the simulation (including adding airmass -and wavelength effects to the seeing, etc.). It also generates and reads back almanac information, such -as the sunrise and sunset times and planetary positions over the expected lifetime of LSST. - - -Python API -========== - -* :ref:`rubin_sim.site_models api` - -* :ref:`search` \ No newline at end of file diff --git a/doc/rs_skybrightness/index.rst b/doc/rs_skybrightness/index.rst deleted file mode 100644 index b87ad99f5..000000000 --- a/doc/rs_skybrightness/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. py:currentmodule:: rubin_sim.skybrightness - -.. _rubin_sim.skybrightness: - -======================= -rubin_sim skybrightness -======================= - -The rubin_sim.skybrightness module generates predicted skybrightness values (in either magnitudes per -square arcsecond for any LSST bandpass or as a SED over the relevant wavelengths). It uses the -ESO skybrightness model components (includes upper and lower atmosphere emission lines, airglow continuum, -zodiacal light and scattered lunar light) and has additional twilight components. The model predictions -have been tested against skybrightness measurements at the LSST site. - -More details about the rubin_sim version of the model and its validation for Rubin is available in -`An optical to IR sky brightness model for the LSST by Yoachim et. al. -`_. - - -Python API -========== - -* :ref:`rubin_sim.skybrightness api` - -* :ref:`search` \ No newline at end of file diff --git a/doc/rs_skybrightness_pre/index.rst b/doc/rs_skybrightness_pre/index.rst deleted file mode 100644 index 80d11ecad..000000000 --- a/doc/rs_skybrightness_pre/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. py:currentmodule:: rubin_sim.skybrightness_pre - -.. _rubin_sim.skybrightness_pre: - -=========================== -rubin_sim skybrightness_pre -=========================== - -The rubin_sim.skybrightness_pre module generates and accesses pre-calculated skybrightness values -over the sky in each bandpass during the expected on-sky period for LSST. The values calculated -by the rubin_sim.skybrightness module are packaged into data files which are then read and passed to -the scheduler by rubin_sim.skybrightness_pre. - - -Python API -========== - -* :ref:`rubin_sim.skybrightness_pre api` - -* :ref:`search` \ No newline at end of file diff --git a/doc/rs_utils/index.rst b/doc/rs_utils/index.rst deleted file mode 100644 index 44a670219..000000000 --- a/doc/rs_utils/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. py:currentmodule:: rubin_sim.utils - -.. _rubin_sim.utils: - -=============== -rubin_sim utils -=============== - -The rubin_sim.utils module provides basic utilities that are useful throughout rubin_sim. -These include utilities for tree generation in healpy and other healpix manipulation, -coordinate transform utilities, as well as other useful tools. - - -Python API -========== - -* :ref:`rubin_sim.utils api` - -* :ref:`search` diff --git a/doc/rubin_sim/index.rst b/doc/rubin_sim/index.rst deleted file mode 100644 index ead4cc2be..000000000 --- a/doc/rubin_sim/index.rst +++ /dev/null @@ -1,187 +0,0 @@ -.. py:currentmodule:: rubin_sim - -.. _rubin_sim.index: - -######### -rubin_sim -######### - -:doc:`Overview <../index>` - -:doc:`Table of contents <../toc>` - - -========== -Python API -========== - -.. _rubin_sim.utils api: - -:doc:`rubin_sim.utils api <../rs_utils/index>` -============================================== -.. automodapi:: rubin_sim.utils - :no-main-docstr: - :no-inheritance-diagram: - -.. _rubin_sim.data api: - -:doc:`rubin_sim.data api <../rs_data/index>` -============================================ -.. automodapi:: rubin_sim.data - :no-main-docstr: - :no-inheritance-diagram: - - -.. _rubin_sim.phot_utils api: - -:doc:`rubin_sim.phot_utils api <../rs_phot_utils/index>` -====================================================== -.. automodapi:: rubin_sim.phot_utils - :no-main-docstr: - :no-inheritance-diagram: - - -.. _rubin_sim.satellite_constellations api: - -:doc:`rubin_sim.satellite_constellations api <../rs_satellite_constellations/index>` -====================================================== -.. automodapi:: rubin_sim.satellite_constellations - :no-main-docstr: - :no-inheritance-diagram: - -.. _rubin_sim.selfcal api: - -:doc:`rubin_sim.selfcal api <../rs_selfcal/index>` -====================================================== -.. automodapi:: rubin_sim.selfcal - :no-main-docstr: - :no-inheritance-diagram: - -.. _rubin_sim.site_models api: - -:doc:`rubin_sim.site_models api <../rs_site_models/index>` -========================================================== -.. automodapi:: rubin_sim.site_models - :no-main-docstr: - :no-inheritance-diagram: - -.. _rubin_sim.skybrightness api: - -:doc:`rubin_sim.skybrightness api <../rs_skybrightness/index>` -============================================================== -.. automodapi:: rubin_sim.skybrightness - :no-main-docstr: - :no-inheritance-diagram: - -.. _rubin_sim.skybrightness_pre api: - -:doc:`rubin_sim.skybrightness_pre api <../rs_skybrightness_pre/index>` -====================================================================== -.. automodapi:: rubin_sim.skybrightness_pre - :no-main-docstr: - :no-inheritance-diagram: - -.. _rubin_sim.scheduler api: - -:doc:`rubin_sim.scheduler api <../rs_scheduler/index>` -====================================================== -.. automodapi:: rubin_sim.scheduler - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.scheduler.schedulers - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.scheduler.surveys - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.scheduler.basis_functions - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.scheduler.features - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.scheduler.detailers - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.scheduler.model_observatory - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.scheduler.utils - :no-main-docstr: - :no-inheritance-diagram: - - -.. _rubin_sim.movingObjects api: - -:doc:`rubin_sim.movingObjects api <../rs_moving_objects/index>` -============================================================== -.. automodapi:: rubin_sim.moving_objects - :no-main-docstr: - :no-inheritance-diagram: - - -.. _rubin_sim.maf api: - -:doc:`rubin_sim.maf api <../rs_maf/index>` -========================================== -.. automodapi:: rubin_sim.maf.batches - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.metrics - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.maf_contrib - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.slicers - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.db - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.maps - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.stackers - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.plots - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.metric_bundles - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.run_comparison - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.utils - :no-main-docstr: - :no-inheritance-diagram: - -.. automodapi:: rubin_sim.maf.web - :no-main-docstr: - :no-inheritance-diagram: - - -====== -Search -====== - -* :ref:`search` diff --git a/doc/toc.rst b/doc/toc.rst deleted file mode 100644 index 930efcc1a..000000000 --- a/doc/toc.rst +++ /dev/null @@ -1,20 +0,0 @@ -================= -Table of contents -================= - -.. toctree:: - :glob: - :maxdepth: 1 - - rs_utils/* - rs_data/* - rs_phot_utils/* - rs_satellite_constellations/* - rs_selfcal/* - rs_site_models/* - rs_skybrightness/* - rs_skybrightness_pre/* - rs_scheduler/* - rs_moving_objects/* - rs_maf/* - rubin_sim/* \ No newline at end of file diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000..c6a1e0663 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,3 @@ +_build/* +source/* +maf-metric-list.rst diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..d4bb2cbb9 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/api.rst b/docs/api.rst new file mode 100644 index 000000000..228b21ef6 --- /dev/null +++ b/docs/api.rst @@ -0,0 +1,23 @@ +.. py:currentmodule:: rubin_sim + +.. _api: + +API +=== + +.. toctree:: + :maxdepth: 2 + + Data + + Metrics Analysis Framework (MAF) + + Moving Objects + + Phot Utils + + Satellite Constellations + + Self Calibration + + Skybrightness \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..3cd4584d2 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,12 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +from documenteer.conf.guide import * # noqa: F403, import * + +linkcheck_retries = 2 + +from .metric_list import make_metric_list + +make_metric_list("maf-metric-list.rst") diff --git a/docs/data-api.rst b/docs/data-api.rst new file mode 100644 index 000000000..f941dd02f --- /dev/null +++ b/docs/data-api.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.data + +.. _data-api: + +======== +Data API +======== + +.. automodule:: rubin_sim.data + :imported-members: + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/data-download.rst b/docs/data-download.rst new file mode 100644 index 000000000..0e941efa8 --- /dev/null +++ b/docs/data-download.rst @@ -0,0 +1,70 @@ +.. py:currentmodule:: rubin_sim.data + +.. _data-download: + +============= +Data Download +============= + +The ``rubin_sim.data`` module provides a script to download the data +required to run various modules in ``rubin_sim``, as well as to check the +expected versions of the data. It also provides utilities to interpret +the location of this $RUBIN_SIM_DATA_DIR on disk and to return the +path to the current baseline simulation output (one of the datafiles +downloaded by this module). + +With the split of ``rubin_sim`` into ``rubin_sim`` + ``rubin_scheduler``, the +required data download utilities now live in the +`rubin_scheduler.data `_ +package. ``rubin_scheduler`` is a necessary dependency of ``rubin_sim`` and +should have +been installed during the :ref:`installation ` process. +The ``rubin_sim.data`` module simply provides additional information on the +data files necessary for ``rubin_sim``, then calls the scripts from +``rubin_scheduler.data`` to execute the download. + + +Downloading Necessary Data +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Please see the information in the +`rubin-scheduler "Downloading Necessary Data" documentation `_ +for more details on setting up $RUBIN_SIM_DATA_DIR (which is +shared between ``rubin_scheduler``, ``rubin_sim`` and ``schedview``). + +Using either the default path to $RUBIN_SIM_DATA_DIR, or after setting it +explicitly, first download the necessary data for `rubin_scheduler` and +then add the (larger) data set for `rubin_sim`: + +.. code-block:: bash + + scheduler_download_data + rs_download_data + +This creates a series of directories at $RUBIN_SIM_DATA_DIR (in addition +to the directories originating from `rubin_scheduler `_): + +* maf (containing data used for various metrics) +* maps (containing various stellar density and 2-D and 3-D dust maps) +* movingObjects (containing asteroid SEDs) +* orbits (containing orbits for Solar System population samples) +* orbits_precompute (precomputed daily orbits for the samples above) +* sim_baseline (containing the current baseline simulation output) +* skybrightness (containing information needed for the skybrightness module) +* throughputs (current baseline throughput information) +* test (containing data for unit tests) + + +Note that the data will only be downloaded for the directories which do +not already exist, regardless of whether the version on disk is up to date. +To force an update to a version which matches the ``rubin_scheduler`` version: + +.. code-block:: bash + + rs_download_data --update + +This can also be applied only to certain directories, using the +``--dirs`` flag. It may be worth noting that some of the above directories +are more sizeable than others -- the ``maps``, ``maf`` and +``orbits_precompute`` directories are the largest and if not needed, can +be skipped in download by using ``--dirs``. \ No newline at end of file diff --git a/docs/documenteer.toml b/docs/documenteer.toml new file mode 100644 index 000000000..33c3de478 --- /dev/null +++ b/docs/documenteer.toml @@ -0,0 +1,11 @@ +[project] +title = "rubin_sim" +copyright = "2023 Association of Universities for Research in Astronomy, Inc. (AURA)" +base_url = "https://rubin-sim.lsst.io" +github_url = "https://github.com/lsst/rubin_sim" + +[project.python] +package="rubin_sim" + +[sphinx] +extensions = ["sphinx.ext.viewcode"] diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 000000000..4621faa3d --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,21 @@ +.. py:currentmodule:: rubin_sim + +.. _rubin-sim-index: + +######### +rubin_sim +######### + +Helping simulate and evaluate the performance of +Rubin C. Observatory's Legacy Survey of Space and Time (the LSST), +through evaluating prototypes of various analysis, simulating photometry +and providing a framework for analyzing survey strategy progress and outcomes. + + +.. toctree:: + :maxdepth: 2 + + Introduction + Installation + User Guide + API \ No newline at end of file diff --git a/docs/installation.rst b/docs/installation.rst new file mode 100644 index 000000000..3f8d15a95 --- /dev/null +++ b/docs/installation.rst @@ -0,0 +1,85 @@ +.. py:currentmodule:: rubin_sim + +.. _installation: + +############ +Installation +############ + +Quick Installation +------------------ + +Installation from PyPI: + +:: + + pip install rubin-sim + +Note: pip installation of rubin-sim will lack the JPL data (DE405, etc.) +that is needed to actually run ``pyoorb``, used in ``rubin_sim.moving_objects``, as this is not currently available from PyPI. +Please see the `oorb installation instructions `_ for more information. + +or from conda-forge: + +:: + + conda install -c conda-forge rubin-sim + +Please note that following either installation, +additional data must be downloaded to use the software, +following the instructions at +:ref:`Data Download`. + +For Developer Use +----------------- + +First, clone the `rubin_sim `_ repository: + +:: + + git clone git@github.com:lsst/rubin_sim.git + cd rubin_sim + + +Create a conda environment for it: + +:: + + conda create --channel conda-forge --name rubin_sim --file requirements.txt python=3.11 + + +If you want to run tests (please do), install the test requirements as well: + +:: + + conda activate rubin_scheduler + conda install -c conda-forge --file=test-requirements.txt + + +Install the ``rubin_sim`` package into this environment (from the rubin_sim directory): + +:: + + pip install -e . --no-deps + +Please note that following installation, +additional data must be downloaded to use the software, +following the instructions at +:ref:`Data Download`. + + +Building Documentation +---------------------- + +An online copy of the documentation is available at https://rubin-sim.lsst.io, +however building a local copy can be done as follows: + +:: + + pip install "documenteer[guide]" + cd docs + package-docs build + + +The root of the local documentation will then be ``docs/_build/html/index.html``. + diff --git a/docs/introduction.rst b/docs/introduction.rst new file mode 100644 index 000000000..9f1832fc5 --- /dev/null +++ b/docs/introduction.rst @@ -0,0 +1,37 @@ +.. py:currentmodule:: rubin_sim + +.. _introduction: + +############ +Introduction +############ + +The `Legacy Survey of Space and Time `_ (LSST) +is anticipated to encompass around 2 million observations spanning a decade, +averaging 800 visits per night. The `rubin_sim` package was built to help +understand the predicted performance of the LSST. + +The :ref:`Phot Utils` module provides synthetic photometry +using provided throughput curves based on current predicted performance. + +The :ref:`skybrightness` module incorporates the ESO +sky model, modified to match measured sky conditions at the LSST site, +including an addition of a model for twilight skybrightness. This is used +to generate the pre-calculated skybrightness data used in +`rubin_scheduler`_. + +The :ref:`Moving Objects` module provides a way to create +synthetic observations of moving objects, based on how they would appear in +pointing databases ("opsims") created by +`rubin_scheduler `_. + +One of the major goals for `rubin_sim` is to enable efficient and +scientifically varied evaluation of the LSST survey strategy and progress, +by providing a framework to enable these metrics to run in a +standardized way on opsim outputs. +The :ref:`Metrics Analysis Framework` module provides these tools. + +.. toctree:: + :maxdepth: 2 + + User Guide diff --git a/docs/maf-api-batches.rst b/docs/maf-api-batches.rst new file mode 100644 index 000000000..a25a9f58a --- /dev/null +++ b/docs/maf-api-batches.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-batches: + +======= +Batches +======= + +.. automodule:: rubin_sim.maf.batches + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-db.rst b/docs/maf-api-db.rst new file mode 100644 index 000000000..84f7cdb76 --- /dev/null +++ b/docs/maf-api-db.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-db: + +============== +Databases (db) +============== + +.. automodule:: rubin_sim.maf.db + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-maf-contrib.rst b/docs/maf-api-maf-contrib.rst new file mode 100644 index 000000000..451736024 --- /dev/null +++ b/docs/maf-api-maf-contrib.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-maf-contrib: + +=========== +Maf Contrib +=========== + +.. automodule:: rubin_sim.maf.maf_contrib + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-maps.rst b/docs/maf-api-maps.rst new file mode 100644 index 000000000..7f4653ed4 --- /dev/null +++ b/docs/maf-api-maps.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-maps: + +======= +Maps +======= + +.. automodule:: rubin_sim.maf.maps + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-metricbundles.rst b/docs/maf-api-metricbundles.rst new file mode 100644 index 000000000..d251e51cd --- /dev/null +++ b/docs/maf-api-metricbundles.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-metricbundles: + +============== +Metric Bundles +============== + +.. automodule:: rubin_sim.maf.metricbundles + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-metrics.rst b/docs/maf-api-metrics.rst new file mode 100644 index 000000000..0d3ab6c70 --- /dev/null +++ b/docs/maf-api-metrics.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-metrics: + +======= +Metrics +======= + +.. automodule:: rubin_sim.maf.metrics + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-plots.rst b/docs/maf-api-plots.rst new file mode 100644 index 000000000..bea4bdffe --- /dev/null +++ b/docs/maf-api-plots.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-plots: + +======= +Plots +======= + +.. automodule:: rubin_sim.maf.plots + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-run-comparison.rst b/docs/maf-api-run-comparison.rst new file mode 100644 index 000000000..815f64347 --- /dev/null +++ b/docs/maf-api-run-comparison.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-run-comparison: + +============== +Run Comparison +============== + +.. automodule:: rubin_sim.maf.run_comparison + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-slicers.rst b/docs/maf-api-slicers.rst new file mode 100644 index 000000000..21b57dfb7 --- /dev/null +++ b/docs/maf-api-slicers.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-slicers: + +======= +Slicers +======= + +.. automodule:: rubin_sim.maf.slicers + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-stackers.rst b/docs/maf-api-stackers.rst new file mode 100644 index 000000000..99cc8e435 --- /dev/null +++ b/docs/maf-api-stackers.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-stackers: + +======== +Stackers +======== + +.. automodule:: rubin_sim.maf.stackers + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api-utils.rst b/docs/maf-api-utils.rst new file mode 100644 index 000000000..7989795bd --- /dev/null +++ b/docs/maf-api-utils.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api-utils: + +======= +Utils +======= + +.. automodule:: rubin_sim.maf.utils + :imported-members: + :members: + :show-inheritance: diff --git a/docs/maf-api.rst b/docs/maf-api.rst new file mode 100644 index 000000000..df8a3aa5d --- /dev/null +++ b/docs/maf-api.rst @@ -0,0 +1,21 @@ +.. py:currentmodule:: rubin_sim.maf + +.. _maf-api: + +======= +MAF API +======= + + +.. toctree:: + + Batches + Databases (db) + Maps + Maf Contrib + Metrics + MetricBundles + Plots + Run Comparison + Slicers + Utils \ No newline at end of file diff --git a/doc/rs_maf/index.rst b/docs/maf.rst similarity index 82% rename from doc/rs_maf/index.rst rename to docs/maf.rst index 153862100..bdcd7fa58 100644 --- a/doc/rs_maf/index.rst +++ b/docs/maf.rst @@ -1,18 +1,17 @@ -.. py:currentmodule:: rubin_sim.maf +.. py:currentmodule:: rubin_sim -.. _rubin_sim.maf: +.. _maf: -============= -rubin_sim MAF -============= +################################ +Metrics Analysis Framework (MAF) +################################ +The ``rubin_sim.maf`` Metrics Analysis Framework (MAF) module is +intended to make it easier to write code to analyze our simulated LSST +pointing histories (often called "opsim outputs"). -The Metrics Analysis Framework (MAF) is a software package -intended to make it easier to write code to analyze telescope -pointing histories; in particular, the primary use case is to -analyze simulated surveys generated by the LSST scheduler code. - -As an example: suppose one wanted to evaluate LSST's performance regarding +As an example: suppose one wanted to evaluate the LSST's performance in +regards to characterizing a particular kind of periodic variable in a given simulated survey. As such, you might have particular requirements on the parameters of the observations at each point in RA/Dec space -- MAF will handle getting @@ -26,7 +25,7 @@ analysis -- statistical summaries over the observed sky, such as the mean, median, RMS, minimum or maximum values). In this case, you would only have to write a small piece of code (a *metric*) that makes the actual evaluation, assuming you have the relevant observations for a single piece of sky. -A simple list of all :doc:`available metrics ` is available. +A simple list of all :ref:`available metrics ` is available. A concrete example of this can be found in the KNeMetric - which is illustrated in depth in a notebook in the github repo at `lsst/rubin_sim_notebooks @@ -56,9 +55,6 @@ For more examples of using MAF, please see our `tutorials`_. .. _tutorials: https://github.com/lsst/rubin_sim_notebooks/tree/main/maf -Python API -========== - -* :ref:`rubin_sim.maf api` +.. toctree:: -* :ref:`search` + List of Available Metrics \ No newline at end of file diff --git a/doc/metric_list.py b/docs/metric_list.py similarity index 97% rename from doc/metric_list.py rename to docs/metric_list.py index 066c4b895..1161aabfe 100644 --- a/doc/metric_list.py +++ b/docs/metric_list.py @@ -42,6 +42,5 @@ def make_metric_list(outfile): print(f"- {link} \n \t {simpledoc}", file=f) print(" ", file=f) - if __name__ == "__main__": - make_metric_list("rs_maf/metric_list.rst") + make_metric_list("maf-metric-list.rst") \ No newline at end of file diff --git a/docs/moving-objects-api.rst b/docs/moving-objects-api.rst new file mode 100644 index 000000000..8ded7c8f4 --- /dev/null +++ b/docs/moving-objects-api.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.moving_objects + +.. _moving-objects-api: + +================== +Moving Objects API +================== + +.. automodule:: rubin_sim.moving_objects + :imported-members: + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/moving-objects.rst b/docs/moving-objects.rst new file mode 100644 index 000000000..bbcb989d1 --- /dev/null +++ b/docs/moving-objects.rst @@ -0,0 +1,22 @@ +.. py:currentmodule:: rubin_sim + +.. _moving-objects: + + +############## +Moving Objects +############## + +The ``rubin_sim.movingObjects`` module provides tools to +generate simulated ephemerides of a population of +small bodies throughout an LSST pointing history. +These ephemerides are typically used for further +analysis in :ref:`MAF ` to evaluate the effect of +survey strategy on various populations +of Solar System objects. + +There are several populations available in the "orbits" directory of +$RUBIN_SIM_DATA_DIR. Many of these populations were contributed or +enhanced by the LSST Solar System Science Collaboration (SSSC). +Further documentation on these orbital populations is available in the +`LSST-SSSC "SSSC_test_populations" `_ repo. \ No newline at end of file diff --git a/docs/phot-utils-api.rst b/docs/phot-utils-api.rst new file mode 100644 index 000000000..23e33ce07 --- /dev/null +++ b/docs/phot-utils-api.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.phot_utils + +.. _phot-utils-api: + +============== +Phot Utils API +============== + +.. automodule:: rubin_sim.phot_utils + :imported-members: + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/phot-utils.rst b/docs/phot-utils.rst new file mode 100644 index 000000000..0d635db9b --- /dev/null +++ b/docs/phot-utils.rst @@ -0,0 +1,11 @@ +.. py:currentmodule:: rubin_sim + +.. _phot-utils: + +########## +Phot Utils +########## + +The ``rubin_sim.photUtils`` module provides synthetic photometry +and SNR calculation methods for Rubin. There are expected throughput +curves available in the 'throughputs' directory of $RUBIN_SIM_DATA_DIR. \ No newline at end of file diff --git a/docs/satellite-constellations-api.rst b/docs/satellite-constellations-api.rst new file mode 100644 index 000000000..8757e08e6 --- /dev/null +++ b/docs/satellite-constellations-api.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.satellite_constellations + +.. _satellite-constellations-api: + +============================ +Satellite Constellations API +============================ + +.. automodule:: rubin_sim.satellite_constellations + :imported-members: + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/satellite-constellations.rst b/docs/satellite-constellations.rst new file mode 100644 index 000000000..32aa35168 --- /dev/null +++ b/docs/satellite-constellations.rst @@ -0,0 +1,13 @@ +.. py:currentmodule:: rubin_sim + +.. _satellite-constellations: + +######################## +Satelilte Constellations +######################## + +The ``rubin_sim.satellite_constellations`` module contains +tools for creating and propgating satellite mega constellations +to evaluate how they could streak Rubin images. +There is also an extension for ``rubin_scheduler`` that will add +"satellite dodging" to the scheduler logic, at a cost of overall image depth. diff --git a/docs/selfcal-api.rst b/docs/selfcal-api.rst new file mode 100644 index 000000000..d978e088f --- /dev/null +++ b/docs/selfcal-api.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.selfcal + +.. _selfcal-api: + +==================== +Self Calibration API +==================== + +.. automodule:: rubin_sim.selfcal + :imported-members: + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/selfcal.rst b/docs/selfcal.rst new file mode 100644 index 000000000..c7bdfb422 --- /dev/null +++ b/docs/selfcal.rst @@ -0,0 +1,15 @@ +.. py:currentmodule:: rubin_sim + +.. _selfcal: + +################ +Self Calibration +################ + +The ``rubin_sim.selfcal`` module contains tools for simulating a basic +self-calibration effort. This was used to estimate photometric errors +remaining after self-calibration in `LSE-180 `_. +This module remains useful for first-pass investigations into the effects +of survey strategy choices on the resulting photometric calibration +possibilities, which in particular can be useful for investigating +the effects of footprint or rolling cadence. \ No newline at end of file diff --git a/docs/skybrightness-api.rst b/docs/skybrightness-api.rst new file mode 100644 index 000000000..c4499fd04 --- /dev/null +++ b/docs/skybrightness-api.rst @@ -0,0 +1,12 @@ +.. py:currentmodule:: rubin_sim.skybrightness + +.. _skybrightness-api: + +================= +Skybrightness API +================= + +.. automodule:: rubin_sim.skybrightness + :imported-members: + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/skybrightness.rst b/docs/skybrightness.rst new file mode 100644 index 000000000..5cdefb232 --- /dev/null +++ b/docs/skybrightness.rst @@ -0,0 +1,23 @@ +.. py:currentmodule:: rubin_sim.skybrightness + +.. _skybrightness: + +############# +Skybrightness +############# + +The `rubin_sim.skybrightness` module generates +predicted skybrightness values (in either magnitudes per +square arcsecond for any LSST bandpass or +as a SED over the relevant wavelengths). +It uses the ESO skybrightness model components +(includes upper and lower atmosphere emission lines, airglow continuum, +zodiacal light and scattered lunar light) and has additional +twilight components. +The model predictions have been tested against skybrightness +measurements at the LSST site. + +More details about the rubin_sim version of the model and +its validation for Rubin are available in +`An optical to IR sky brightness model for the LSST by Yoachim et. al. +`_. diff --git a/docs/user-guide.rst b/docs/user-guide.rst new file mode 100644 index 000000000..d1d726cfd --- /dev/null +++ b/docs/user-guide.rst @@ -0,0 +1,27 @@ +.. py:currentmodule:: rubin_sim + +.. _user-guide: + +########## +User Guide +########## + + +.. toctree:: + + Data Download Utilities + + Metrics Analysis Framework (MAF) + + Moving Objects + + Phot Utils + + Satellite Constellations + + Self Calibration + + Skybrightness + + + From e2d21ffdce30b4c927bf75b37e4d25c94103ae5d Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Thu, 21 Dec 2023 16:58:08 -0800 Subject: [PATCH 02/26] Migrate to user-guide --- docs/Makefile | 6 +++++- docs/conf.py | 4 ---- docs/installation.rst | 2 +- docs/maf.rst | 2 +- docs/metric_list.py | 15 +++++++++------ 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index d4bb2cbb9..c09ffdab8 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -3,7 +3,7 @@ # You can set these variables from the command line, and also # from the environment for the first two. -SPHINXOPTS ?= +SPHINXOPTS ?= --keep-going -T -n SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build @@ -18,3 +18,7 @@ help: # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +html: + python metric_list.py + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 3cd4584d2..4ca84a36b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,7 +6,3 @@ from documenteer.conf.guide import * # noqa: F403, import * linkcheck_retries = 2 - -from .metric_list import make_metric_list - -make_metric_list("maf-metric-list.rst") diff --git a/docs/installation.rst b/docs/installation.rst index 3f8d15a95..798c7aafd 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -78,7 +78,7 @@ however building a local copy can be done as follows: pip install "documenteer[guide]" cd docs - package-docs build + make html The root of the local documentation will then be ``docs/_build/html/index.html``. diff --git a/docs/maf.rst b/docs/maf.rst index bdcd7fa58..ab4f784e3 100644 --- a/docs/maf.rst +++ b/docs/maf.rst @@ -32,7 +32,7 @@ in depth in a notebook in the github repo at `lsst/rubin_sim_notebooks `_ (see the maf/science directory). -MAF also provides lots of ready to use :doc:`metrics `, as well as +MAF also provides lots of ready to use :ref:`metrics `, as well as a variety of ways to subdivide the pointing histories using :py:obj:`rubin_sim.maf.slicers` -- a typical use case is to evaluate a quantity at all points over the sky, which would use the :py:class:`rubin_sim.maf.slicers.HealpixSlicer` slicer, but there are diff --git a/docs/metric_list.py b/docs/metric_list.py index 1161aabfe..a692b0720 100644 --- a/docs/metric_list.py +++ b/docs/metric_list.py @@ -12,14 +12,16 @@ def make_metric_list(outfile): # Print header print(".. py:currentmodule:: rubin_sim.maf", file=f) print("", file=f) - print(".. _rubin_sim.maf_metricist:", file=f) + print(".. _maf-metric-list:", file=f) print("", file=f) - print("================================", file=f) + print("################################", file=f) print("rubin_sim MAF: Available metrics", file=f) - print("================================", file=f) + print("################################", file=f) + + print(" ", file=f) print("Core LSST MAF metrics", file=f) - print("=====================", file=f) + print("^^^^^^^^^^^^^^^^^^^^^", file=f) print(" ", file=f) for name, obj in inspect.getmembers(metrics): if inspect.isclass(obj): @@ -31,7 +33,7 @@ def make_metric_list(outfile): print(" ", file=f) print("Contributed maf_contrib metrics", file=f) - print("==============================", file=f) + print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", file=f) print(" ", file=f) for name, obj in inspect.getmembers(maf_contrib): if inspect.isclass(obj): @@ -42,5 +44,6 @@ def make_metric_list(outfile): print(f"- {link} \n \t {simpledoc}", file=f) print(" ", file=f) + if __name__ == "__main__": - make_metric_list("maf-metric-list.rst") \ No newline at end of file + make_metric_list("maf-metric-list.rst") From 425b7a2fef9e82a007cf91a3f2026040329e173b Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Thu, 21 Dec 2023 17:00:16 -0800 Subject: [PATCH 03/26] Clean ruff/docstrings in data, satellites, selfcal --- rubin_sim/data/__init__.py | 2 +- rubin_sim/data/rs_download_data.py | 43 ++++++++-- .../satellite_constellations/__init__.py | 6 +- .../basis_function.py | 6 +- .../model_observatory.py | 40 +++++---- .../satellite_constellations/sat_utils.py | 81 ++++++++++--------- rubin_sim/selfcal/__init__.py | 8 +- rubin_sim/selfcal/generate_catalog.py | 23 +++--- rubin_sim/selfcal/offsets.py | 27 ++++--- rubin_sim/selfcal/solver.py | 29 ++++--- rubin_sim/selfcal/star_tools.py | 6 +- 11 files changed, 163 insertions(+), 108 deletions(-) diff --git a/rubin_sim/data/__init__.py b/rubin_sim/data/__init__.py index 61905549b..7361d80cd 100644 --- a/rubin_sim/data/__init__.py +++ b/rubin_sim/data/__init__.py @@ -1 +1 @@ -from .rs_download_data import * +from .rs_download_data import * #noqa: F403 diff --git a/rubin_sim/data/rs_download_data.py b/rubin_sim/data/rs_download_data.py index 454740800..fd186b032 100644 --- a/rubin_sim/data/rs_download_data.py +++ b/rubin_sim/data/rs_download_data.py @@ -1,30 +1,55 @@ -__all__ = ("data_dict", "rs_download_data", "get_data_dir") +__all__ = ("data_dict", "rs_download_data", "get_data_dir", "get_baseline") import argparse from rubin_scheduler.data import DEFAULT_DATA_URL, download_rubin_data from rubin_scheduler.data import get_data_dir as gdd +from rubin_scheduler.data import get_baseline as gbd def get_data_dir(): - """For backwards compatibility since this got moved over to the scheduler.""" + """Wraps rubin_scheduler.data.get_data_dir(). + Provided here for backwards compatibility. + + Returns + ------- + $RUBIN_SIM_DATA_DIR : `str` + Directory containing the necessary data for rubin_sim_data. + """ return gdd() +def get_baseline(): + """Wraps rubin_scheduler.data.get_baseline(). + Provided here for backwards compatibility. + + Returns + ------- + baseline_simulation_filepath : `str` + Filepath to the baseline simulation provided with rubin_sim_data. + """ + # Note: this should probably return to rubin_sim, as sim_baseline is + # not part of the data for rubin_scheduler. + return gbd() + + def data_dict(): - """Creates a `dict` for all data buckets and the tar file they map to. - To create tar files and follow any sym links, run: - ``tar -chvzf maf_may_2021.tgz maf`` + """ + Dictionary containing expected version information for rubin_sim_data + data sets, for this version of rubin_sim. Returns ------- - result : `dict` + file_dict : `dict` Data bucket filenames dictionary with keys: ``"name"`` Data bucket name (`str`). ``"version"`` Versioned file name (`str`). """ + # Note for developers: + # to create tar files and follow any sym links, run: e.g. + # ``tar -chvzf maf_may_2021.tgz maf`` file_dict = { "maf": "maf_2022_08_26.tgz", "maps": "maps_2022_2_28.tgz", @@ -40,7 +65,11 @@ def data_dict(): def rs_download_data(): - """Download data.""" + """Utility to download necessary data for rubin_sim. + + Wrapper around rubin_scheduler.scheduler_download_data, + but downloading the data files specified by rubin_sim. + """ files = data_dict() parser = argparse.ArgumentParser(description="Download data files for rubin_sim package") diff --git a/rubin_sim/satellite_constellations/__init__.py b/rubin_sim/satellite_constellations/__init__.py index 502d4d461..fdec3660d 100644 --- a/rubin_sim/satellite_constellations/__init__.py +++ b/rubin_sim/satellite_constellations/__init__.py @@ -1,3 +1,3 @@ -from .basis_function import * -from .model_observatory import * -from .sat_utils import * +from .basis_function import * # noqa: F403 +from .model_observatory import * # noqa: F403 +from .sat_utils import * # noqa: F403 diff --git a/rubin_sim/satellite_constellations/basis_function.py b/rubin_sim/satellite_constellations/basis_function.py index 700e4f702..e0e7ab012 100644 --- a/rubin_sim/satellite_constellations/basis_function.py +++ b/rubin_sim/satellite_constellations/basis_function.py @@ -6,7 +6,8 @@ class SatelliteAvoidBasisFunction(bf.BaseBasisFunction): - """Class to take satellite position information from the conditions object and avoid streaks + """Uses satellite position information from the Conditions object + and then avoids streaks. Parameters ---------- @@ -32,7 +33,8 @@ def _calc_value(self, conditions, indx=None): result = hp.smoothing(result, fwhm=self.smooth_fwhm) result = hp.ud_grade(result, self.nside) result[np.where(result < 0)] = 0 - # Make it negative, so positive weights will result in avoiding satellites + # Make it negative, so positive weights will result + # in avoiding satellites result *= -1 return result diff --git a/rubin_sim/satellite_constellations/model_observatory.py b/rubin_sim/satellite_constellations/model_observatory.py index 20b24801c..6e8b4ebca 100644 --- a/rubin_sim/satellite_constellations/model_observatory.py +++ b/rubin_sim/satellite_constellations/model_observatory.py @@ -5,7 +5,8 @@ from rubin_scheduler.site_models import Almanac from rubin_scheduler.utils import _healbin, survey_start_mjd -# Take the model observatory from the scheduler and subclass and expand to include satellite constellations +# Take the model observatory from the scheduler and +# subclass to expand to include satellite constellations class ModelObservatory(OMO): @@ -16,17 +17,20 @@ class ModelObservatory(OMO): nside : `int` The healpix nside resolution mjd_start : `float` - The MJD to start the observatory up at. Uses util to lookup default if None. + The MJD to start the observatory up at. + Uses util to lookup default if None. alt_min : `float` The minimum altitude to compute models at (degrees). lax_dome : `bool` Passed to observatory model. If true, allows dome creep. cloud_limit : `float` - The limit to stop taking observations if the cloud model returns something equal or higher + The limit to stop taking observations if the cloud model + returns something equal or higher sim_to_o : `sim_targetoO` If one would like to inject simulated ToOs into the telemetry stream. seeing_db : `str` - If one would like to use an alternate seeing database, filename of sqlite file + If one would like to use an alternate seeing database, + filename of sqlite file park_after : `float` Park the telescope after a gap longer than park_after (minutes) init_load_length : `int` @@ -64,7 +68,8 @@ def __init__( self.sat_nside = sat_nside self.constellation = constellation - # Need to do a little fiddle with the MJD since self.mjd needs self.night set now. + # Need to do a little fiddle with the MJD since + # self.mjd needs self.night set now. self.mjd_start = survey_start_mjd() if mjd_start is None else mjd_start self.almanac = Almanac(mjd_start=self.mjd_start) self.night = -1 @@ -73,21 +78,22 @@ def __init__( super().__init__( nside=None, mjd_start=self.mjd_start, - seed=42, - alt_min=5.0, - lax_dome=True, - cloud_limit=0.3, - sim_to_o=None, - seeing_db=None, - park_after=10.0, - init_load_length=10, + seed=seed, + alt_min=alt_min, + lax_dome=lax_dome, + cloud_limit=cloud_limit, + sim_to_o=sim_to_o, + seeing_db=seeing_db, + park_after=park_after, + init_load_length=init_load_length, ) def return_conditions(self): """ Returns ------- - rubin_sim.scheduler.features.conditions object + conditions: `rubin_sim.scheduler.features.conditions` + Current conditions as simulated by the ModelObservatory. """ # Spot to put in satellite streak prediction maps @@ -114,10 +120,10 @@ def mjd(self, value): self._update_satellite_maps() def _update_satellite_maps(self): - """Make the satellite prediction maps for the night + """Make the satellite prediction maps for the night. - will set self.sat_mjds and self.satellite_maps that can then be attached to - a conditions object in self.return_conditions + Will set self.sat_mjds and self.satellite_maps that can then + be attached to a conditions object in self.return_conditions """ sunset = self.almanac.sunsets["sun_n12_setting"][self.almanac_indx] sunrise = self.almanac.sunsets["sun_n12_rising"][self.almanac_indx] diff --git a/rubin_sim/satellite_constellations/sat_utils.py b/rubin_sim/satellite_constellations/sat_utils.py index 65ba0e71b..7e288d66f 100644 --- a/rubin_sim/satellite_constellations/sat_utils.py +++ b/rubin_sim/satellite_constellations/sat_utils.py @@ -19,13 +19,17 @@ def sun_alt_limits(): - """For different constellations, expect zero illuminated satellites above 20 degree altitude - if the sun is below the limits (degrees) + """Return sun altitude limits (degrees) at which zero illuminated + satellites above 20 degrees altitude result. + + Different constellations have different limits at which zero illumination + above 20 degrees occurs. Returns ------- - result : `dict` [`str`, `float`] - Dict with satellite constellation name keys, altitude limits values (degrees). + sun_alt_limits : `dict` {`str`: `float`} + Dict with satellite constellation name keys, + altitude limits values (degrees). """ # Estimated in sun_alts_limits.ipynb result = {"slv1": -36.0, "slv2": -36.0, "oneweb": -53.0} @@ -33,26 +37,26 @@ def sun_alt_limits(): def satellite_mean_motion(altitude, mu=const.GM_earth, r_earth=const.R_earth): - """ - Compute mean motion of satellite at altitude in Earth's gravitational field. + """Calculate mean motion of satellites at a given altitude in Earth's + gravitational field. See https://en.wikipedia.org/wiki/Mean_motion#Formulae Parameters ---------- altitude : `float` - Altitude of the satellite. Should be a float with astropy units attached + Altitude of the satellite. + Should be a float with astropy units attached. Returns ------- - mm : `float` + mean_motion : `float` """ no = np.sqrt(4.0 * np.pi**2 * (altitude + r_earth) ** 3 / mu).to(u.day) return 1 / no def tle_from_orbital_parameters(sat_name, sat_nr, epoch, inclination, raan, mean_anomaly, mean_motion): - """ - Generate TLE strings from orbital parameters. + """Generate TLE strings from orbital parameters. Parameters ---------- @@ -77,13 +81,12 @@ def tle_from_orbital_parameters(sat_name, sat_nr, epoch, inclination, raan, mean Notes ----- - epoch has a very strange format: first two digits are the year, next three + epoch has the format: first two digits are the year, next three digits are the day from beginning of year, then fraction of a day is given, e.g. 20180.25 would be 2020, day 180, 6 hours (UT?) """ # Note: RAAN = right ascention (or longitude) of ascending node - # I suspect this is filling in 0 eccentricity everywhere. def checksum(line): @@ -174,9 +177,8 @@ def create_constellation( def starlink_tles_v1(): - """ - Create a list of satellite TLE's. - For starlink v1 (as of July 2022). Should create 4,408 orbits + """Create a list of satellite TLE's, appropriate for + Starlink v1 (as of July 2022). Should create 4,408 orbits Returns ------- @@ -193,9 +195,8 @@ def starlink_tles_v1(): def starlink_tles_v2(): - """ - Create a list of satellite TLE's - For starlink v2 (as of July 2022). Should create 29,988 orbits + """Create a list of satellite TLE's appropriate for + Starlink v2 (as of July 2022). Should create 29,988 orbits Returns ------- @@ -212,8 +213,7 @@ def starlink_tles_v2(): def oneweb_tles(): - """ - Create a list of satellite TLE's + """Create a list of satellite TLE's appropriate for OneWeb plans (as of July 2022). Should create 6,372 orbits Returns @@ -231,8 +231,9 @@ def oneweb_tles(): class Constellation: - """ - Have a class to hold satellite constellation + """Holds the constellation TLEs and calculates their appearance + in a series of observations. + Parameters ---------- sat_tle_list : `list` of `str` @@ -264,8 +265,8 @@ def _make_location(self): self.observatory_site = wgs84.latlon(telescope.latitude, telescope.longitude, telescope.height) def update_mjd(self, mjd): - """ - Record the alt,az position and illumination status for all the satellites at a given time + """Calculate and record the alt/az position and illumination status + for all the satellites at a given time. """ jd = mjd + MJDOFFSET t = self.ts.ut1_jd(jd) @@ -279,7 +280,8 @@ def update_mjd(self, mjd): self.illum.append(illum.copy()) if illum: topo = current_sat - self.observatory_site.at(t) - alt, az, dist = topo.altaz() # this returns an anoying Angle object + # this returns an Angle object + alt, az, dist = topo.altaz() self.altitudes_rad.append(alt.radians + 0) self.azimuth_rad.append(az.radians + 0) else: @@ -293,8 +295,8 @@ def update_mjd(self, mjd): self.visible = np.where((self.altitudes_rad >= self.alt_limit_rad) & (self.illum == True))[0] def paths_array(self, mjds): - """For an array of MJD values, compute the resulting RA,Dec and illumination status of - the full constellation at each MJD. + """Calculate and return the RA/Dec/Alt and illumination status + for all the satellites at an array of times. Parameters ---------- @@ -342,25 +344,26 @@ def check_pointings( test_radius=10.0, dt=2.0, ): - """Find streak length and number of streaks in an image + """Calculate streak length and number of streaks in a set of visits. Parameters ---------- - pointing_ras : array + pointing_ras : `np.ndarray` The RA for each pointing (degrees). - pointing_decs : array - The dec for each pointing (degres). + pointing_decs : `np.ndarray`` + The dec for each pointing (degrees). mjds : `np.ndarray` The MJD for the (start) of each pointing (days). visit_time : `np.ndarray` - The entire time a visit happend (seconds). + The start to end time for a visit (seconds). fov_radius : `float` The radius of the science field of view (degrees) test_radius : `float` - The radius to use to see if a streak gets close (degrees). Need to set large - because satellites can be moving at ~1 deg/s. + The radius to use to see if a streak gets close (degrees). + Should be large, because satellites can be moving at ~1 deg/s. dt : `float` - The timestep to use for high resolution checking if a satellite crossed (seconds). + The timestep to use for high resolution checking + if a satellite crossed (seconds). Returns ------- @@ -384,7 +387,8 @@ def check_pointings( pointing_decs_rad = np.radians(pointing_decs) fov_radius_rad = np.radians(fov_radius) - # Note self.paths_array should return an array that is N_sats x N_mjds in shape + # Note self.paths_array should return an array that is + # N_sats x N_mjds in shape # And all angles in radians. sat_ra_1, sat_dec_1, sat_alt_1, sat_illum_1 = self.paths_array(mjds) mjd_end = mjds + visit_time @@ -402,7 +406,8 @@ def check_pointings( & ((sat_illum_1 == True) | (sat_illum_2 == True)) ) - # point_to_line_distance can take arrays, but they all need to be the same shape, + # point_to_line_distance can take arrays, + # but they all need to be the same shape, # thus why we broadcast pointing ra and dec above. distances = point_to_line_distance( sat_ra_1[above_illum_indx], @@ -448,7 +453,7 @@ def check_pointings( def _streak_length(sat_ras, sat_decs, pointing_ra, pointing_dec, radius): - """Calc streak lengths + """Calculate streak lengths for satellites in a given (circular) pointing. Parameters ---------- diff --git a/rubin_sim/selfcal/__init__.py b/rubin_sim/selfcal/__init__.py index 72858efe6..91e0e95c5 100644 --- a/rubin_sim/selfcal/__init__.py +++ b/rubin_sim/selfcal/__init__.py @@ -1,4 +1,4 @@ -from .generate_catalog import * -from .offsets import * -from .solver import * -from .star_tools import * +from .generate_catalog import * # noqa: F403 +from .offsets import * # noqa: F403 +from .solver import * # noqa: F403 +from .star_tools import * # noqa: F403 diff --git a/rubin_sim/selfcal/generate_catalog.py b/rubin_sim/selfcal/generate_catalog.py index 5d5d23a2d..625499061 100644 --- a/rubin_sim/selfcal/generate_catalog.py +++ b/rubin_sim/selfcal/generate_catalog.py @@ -5,7 +5,7 @@ import numpy as np import numpy.lib.recfunctions as rfn -from scipy.spatial import cKDTree as kdtree +from scipy.spatial import cKDTree as kdtree # noqa: N813 from .offsets import OffsetSNR from .star_tools import assign_patches, stars_project @@ -37,7 +37,8 @@ def build_tree(ra, dec, leafsize=100): """Build KD tree on RA/dec and set radius (via setRad) for matching. ra, dec = RA and Dec values (in radians). - leafsize = the number of Ra/Dec pointings in each leaf node.""" + leafsize = the number of Ra/Dec pointings in each leaf node. + """ if np.any(np.abs(ra) > np.pi * 2.0) or np.any(np.abs(dec) > np.pi * 2.0): raise ValueError("Expecting RA and Dec values to be in radians.") x, y, z = treexyz(ra, dec) @@ -65,22 +66,22 @@ def generate_catalog( Parameters ---------- - visits : np.array + visits : `np.array` A numpy array with the properties of the visits. Expected columns of fiveSigmaDepth, ra, dec, rotSkyPos (all degrees) - offsets : list of rubin_sim.selfcal.Offset classes + offsets : `list` of rubin_sim.selfcal.Offset classes A list of instatiated classes that will apply offsets to the stars - lsst_filter : str ("r") + lsst_filter : `str` ("r") Which filter to use for the observed stars. - n_patches : int (16) + n_patches : `int` (16) Number of patches to divide the FoV into. Must be an integer squared - radius_fov : float (1.8) + radius_fov : `float` (1.8) Radius of the telescope field of view in degrees - seed : float (42) + seed : `float` (42) Random number seed - uncert_floor : float (0.005) + uncert_floor : `float` (0.005) Value to add in quadrature to magnitude uncertainties (mags) - verbose : bool (True) + verbose : `bool` (True) Should we be verbose """ @@ -136,7 +137,7 @@ def generate_catalog( for i, visit in enumerate(visits): dmags = {} # Calc x,y, radius for each star, crop off stars outside the FoV - # XXX - plan to replace with code to see where each star falls and get chipID. + # could replace with code to see where each star falls and get chipID. vx, vy, vz = treexyz(np.radians(visit["ra"]), np.radians(visit["dec"])) indices = star_tree.query_ball_point((vx, vy, vz), tree_radius) stars_in = stars[indices] diff --git a/rubin_sim/selfcal/offsets.py b/rubin_sim/selfcal/offsets.py index ffe271a98..a2f7dcc51 100644 --- a/rubin_sim/selfcal/offsets.py +++ b/rubin_sim/selfcal/offsets.py @@ -41,24 +41,27 @@ def __call__(self, stars, visits, **kwargs): class OffsetClouds(BaseOffset): - """Offset based on cloud structure. XXX--not fully implamented.""" + """Offset based on cloud structure. + Not used, as not fully implemented in this version (ArmaSf). + """ def __init__(self, sampling=256, fov=3.5): self.fov = fov self.newkey = "dmag_cloud" - self.SF = ArmaSf() - self.cloud = Clouds() + # self.SF = ArmaSf() + self.SF = None + # self.cloud = Clouds() + self.cloud = None def __call__(self, stars, visits, **kwargs): # XXX-Double check extinction is close to the Opsim transparency extinc_mags = visits["transparency"] if extinc_mags != 0.0: - # need to decide on how to get extinc_mags from Opsim - # Maybe push some of these params up to be setable? sf_theta, sf_sf = self.SF.CloudSf(500.0, 300.0, 5.0, extinc_mags, 0.55) # Call the Clouds self.cloud.makeCloudImage(sf_theta, sf_sf, extinc_mags, fov=self.fov) - # Interpolate clouds to correct position. Nearest neighbor for speed? + # Interpolate clouds to correct position. + # Nearest neighbor for speed? nim = self.cloud.cloudimage[0, :].size # calc position in cloud image of each star starx_interp = (np.degrees(stars["x"]) + self.fov / 2.0) * 3600.0 / self.cloud.pixscale @@ -81,9 +84,12 @@ def __call__(self, stars, visits, **kwargs): class OffsetSNR(BaseOffset): - """Generate offsets based on the 5-sigma limiting depth of an observation and the brightness of the star. + """Generate offsets based on the 5-sigma limiting depth of an observation + and the brightness of the star. + Note that this takes into account previous offsets that have been applied - (so run this after things like vingetting).""" + (so run this after things like vignetting). + """ def __init__(self, lsst_filter="r"): self.lsst_filter = lsst_filter @@ -104,8 +110,9 @@ def __call__(self, stars, visit, dmags=None): if dmags is None: dmags = {} temp_mag = stars[self.lsst_filter + "mag"].copy() - # calc what magnitude the star has when it hits the silicon. Thus we compute the SNR noise - # AFTER things like cloud extinction and vingetting. + # calc what magnitude the star has when it hits the silicon. + # Thus we compute the SNR noise + # AFTER things like cloud extinction and vignetting. for key in list(dmags.keys()): temp_mag = temp_mag + dmags[key] dmag = self.calc_mag_errors(temp_mag, visit["fiveSigmaDepth"]) diff --git a/rubin_sim/selfcal/solver.py b/rubin_sim/selfcal/solver.py index 387e7739d..7cb21a42f 100644 --- a/rubin_sim/selfcal/solver.py +++ b/rubin_sim/selfcal/solver.py @@ -14,19 +14,20 @@ class LsqrSolver: Parameters ---------- - observaitons : np.array - A numpy array of the observations. Should have columns id, patch_id, observed_mag, mag_uncert - patch_out : str ("solved_patches.npz") + observations : `np.array` + A numpy array of the observations. + Should have columns id, patch_id, observed_mag, mag_uncert + patch_out : `str` ("solved_patches.npz") Output file for patch solutions, can be set to None - star_out : str ("solved_stars.npz") + star_out : `str` ("solved_stars.npz") Output file for star solutions, can be set to None - atol : float (1e-8) + atol : `float` (1e-8) Tolerance passed to lsqr - btol : float (1e-8) + btol : `float` (1e-8) Tolerance passed to lsqr - iter_lim : int (None) + iter_lim : `int` (None) Iteration limit passed to lsqr - show : bool (False) + show : `bool` (False) Should the lsqr solver print some iteration logs (False). """ @@ -90,7 +91,8 @@ def clean_data(self): for i in range(np.size(left)): self.observations["patch_id"][left[i] : right[i]] = patches_index[i] - # Convert id to continuous running index to keep matrix as small as possible + # Convert id to continuous running index to keep matrix + # as small as possible self.observations.sort(order="id") self.stars = np.unique(self.observations["id"]) @@ -115,9 +117,9 @@ def solve_matrix(self): # data = np.append(np.ones(nObs),1./observations['mag_uncert']) data = 1.0 / self.observations["mag_uncert"] data = np.append(data, data) - b = ( - self.observations["observed_mag"] / self.observations["mag_uncert"] - ) # maybe do this in place earlier? then I can just delete parts of observations earlier to save total memory + # maybe do this in place earlier? + # then just delete parts of observations earlier to save total memory + b = self.observations["observed_mag"] / self.observations["mag_uncert"] # blast away data now that we have the matrix constructed del self.observations @@ -131,7 +133,8 @@ def return_solution(self): """ Returns ------- - np.array with patch zeropoints and star best-fit mags. + patches, stars: `np.array`, `np.array` + Two arrays containing patch zeropoints and star best-fit mags. """ patches = np.empty(self.patches.size, dtype=list(zip(["patch_id", "zp"], [int, float]))) patches["patch_id"] = self.patches diff --git a/rubin_sim/selfcal/star_tools.py b/rubin_sim/selfcal/star_tools.py index 469b50a70..674caac1b 100644 --- a/rubin_sim/selfcal/star_tools.py +++ b/rubin_sim/selfcal/star_tools.py @@ -15,7 +15,8 @@ def stars_project(stars, visit): np.radians(visit["ra"]), np.radians(visit["dec"]), ) - # Rotate the field using the visit rotSkyPos. Hope I got that sign right... + # Rotate the field using the visit rotSkyPos. + # Hope I got that sign right... sin_rot = np.sin(np.radians(visit["rotSkyPos"])) cos_rot = np.cos(np.radians(visit["rotSkyPos"])) stars["x"] = cos_rot * xtemp + sin_rot * ytemp @@ -27,7 +28,8 @@ def stars_project(stars, visit): def assign_patches(stars, visit, n_patches=16, radius_fov=1.8): """ - Assign PatchIDs to everything. Assume that stars have already been projected to x,y + Assign PatchIDs to everything. + Assume that stars have already been projected to x,y """ maxx, maxy = gnomonic_project_toxy(0.0, np.radians(radius_fov), 0.0, 0.0) nsides = n_patches**0.5 From 3d848fc2785a41ed5716c67d4c8ed8b475bb3954 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Thu, 21 Dec 2023 17:02:19 -0800 Subject: [PATCH 04/26] Update doc workflow --- .github/workflows/build_docs.yaml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build_docs.yaml b/.github/workflows/build_docs.yaml index e4fe357d9..71648c56d 100644 --- a/.github/workflows/build_docs.yaml +++ b/.github/workflows/build_docs.yaml @@ -30,7 +30,7 @@ jobs: run: | mamba install --quiet --file=requirements.txt mamba install --quiet pip - pip install "documenteer[pipelines]" + pip install "documenteer[guild]" - name: install rubin_sim shell: bash -l {0} @@ -66,16 +66,14 @@ jobs: shell: bash -l {0} run: | export RUBIN_SIM_DATA_DIR=~/rubin_sim_data - cd doc - python metric_list.py + cd docs make html - #package-docs build - name: upload documentation uses: lsst-sqre/ltd-upload@v1 with: project: "rubin-sim" - dir: "doc/_build/html" + dir: "docs/_build/html" username: ${{ secrets.ltd_username }} password: ${{ secrets.ltd_password }} From 2163c109aabe563b4240cd685921b29aba471ba0 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sat, 13 Jan 2024 18:17:29 -0800 Subject: [PATCH 05/26] Ruff for moving_objects Ruff for moving_objects ruff for satellite constellations ruff Ruff and docstrings phot_utils --- docs/Makefile | 14 +- rubin_sim/moving_objects/__init__.py | 16 +- rubin_sim/moving_objects/base_obs.py | 154 +++--- rubin_sim/moving_objects/cheby_fits.py | 284 ++++++----- rubin_sim/moving_objects/cheby_values.py | 39 +- rubin_sim/moving_objects/chebyshev_utils.py | 72 +-- rubin_sim/moving_objects/direct_obs.py | 95 ++-- rubin_sim/moving_objects/make_lsst_obs.py | 3 +- rubin_sim/moving_objects/ooephemerides.py | 177 ++++--- rubin_sim/moving_objects/orbits.py | 131 +++-- rubin_sim/moving_objects/pre_generate.py | 2 + rubin_sim/moving_objects/utils.py | 14 +- rubin_sim/phot_utils/bandpass.py | 2 +- .../phot_utils/photometric_parameters.py | 136 +++--- rubin_sim/phot_utils/physical_parameters.py | 8 +- rubin_sim/phot_utils/sed.py | 455 ++++++++++-------- rubin_sim/phot_utils/signaltonoise.py | 101 ++-- rubin_sim/phot_utils/utils/__init__.py | 1 - rubin_sim/phot_utils/utils/test_utils.py | 255 ---------- .../model_observatory.py | 4 +- .../satellite_constellations/sat_utils.py | 50 +- rubin_sim/scheduler/__init__.py | 2 +- rubin_sim/utils/__init__.py | 2 +- tests/phot_utils/test_snr.py | 6 +- 24 files changed, 1015 insertions(+), 1008 deletions(-) delete mode 100644 rubin_sim/phot_utils/utils/__init__.py delete mode 100644 rubin_sim/phot_utils/utils/test_utils.py diff --git a/docs/Makefile b/docs/Makefile index c09ffdab8..4a6704eca 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -14,11 +14,19 @@ help: .PHONY: help Makefile +clean: + rm -rf $(BUILDDIR) + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile + python metric_list.py @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -html: - python metric_list.py - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + diff --git a/rubin_sim/moving_objects/__init__.py b/rubin_sim/moving_objects/__init__.py index e56a86f17..ef785b2e9 100644 --- a/rubin_sim/moving_objects/__init__.py +++ b/rubin_sim/moving_objects/__init__.py @@ -1,8 +1,8 @@ -from .base_obs import * -from .cheby_fits import * -from .cheby_values import * -from .chebyshev_utils import * -from .direct_obs import * -from .ooephemerides import * -from .orbits import * -from .utils import * +from .base_obs import * # noqa: F403 +from .cheby_fits import * # noqa: F403 +from .cheby_values import * # noqa: F403 +from .chebyshev_utils import * # noqa: F403 +from .direct_obs import * # noqa: F403 +from .ooephemerides import * # noqa: F403 +from .orbits import * # noqa: F403 +from .utils import * # noqa: F403 diff --git a/rubin_sim/moving_objects/base_obs.py b/rubin_sim/moving_objects/base_obs.py index 0a6a7164c..b04ad98e2 100644 --- a/rubin_sim/moving_objects/base_obs.py +++ b/rubin_sim/moving_objects/base_obs.py @@ -19,39 +19,50 @@ class BaseObs: Parameters ---------- footPrint: `str`, optional - Specify the footprint for the FOV. Options include "camera", "circle", "rectangle". - 'Camera' means use the actual LSST camera footprint (following a rough cut with a circular FOV). + Specify the footprint for the FOV. + Options include "camera", "circle", "rectangle". + 'Camera' means use the actual LSST camera footprint + (following a rough cut with a circular FOV). Default is camera FOV. r_fov : `float`, optional If footprint is "circular", this is the radius of the fov (in degrees). Default 1.75 degrees (only used for circular fov). x_tol : `float`, optional - If footprint is "rectangular", this is half of the width of the (on-sky) fov in the RA - direction (in degrees). - Default 5 degrees. (so size of footprint in degrees will be 10 degrees in the RA direction). + If footprint is "rectangular", this is half of the width + of the (on-sky) fov in the RA direction (in degrees). + Default 5 degrees. y_tol : `float`, optional - If footprint is "rectangular", this is half of the width of the fov in Declination (in degrees). - Default is 3 degrees (so size of footprint in degrees will be 6 degrees in the Dec direction). + If footprint is "rectangular", this is half of the width of + the fov in Declination (in degrees). + Default is 3 degrees eph_mode: `str`, optional Mode for ephemeris generation - nbody or 2body. Default is nbody. eph_type: `str`, optional Type of ephemerides to generate - full or basic. - Full includes all values calculated by openorb; Basic includes a more basic set. - Default is Basic. (this includes enough information for most standard MAF metrics). + Full includes all values calculated by openorb; + Basic includes a more basic set. + Default is Basic. eph_file: `str` or None, optional - The name of the planetary ephemerides file to use for ephemeris generation. + The name of the planetary ephemerides file to use + for ephemeris generation. Default (None) will use the default for PyOrbEphemerides. obs_code: `str`, optional - Observatory code for ephemeris generation. Default is "I11" - Cerro Pachon. + Observatory code for ephemeris generation. + Default is "I11" - Cerro Pachon. obs_time_col: `str`, optional - Name of the time column in the obsData. Default 'observationStartMJD'. + Name of the time column in the obsData. + Default 'observationStartMJD'. obs_time_scale: `str`, optional - Type of timescale for MJD (TAI or UTC currently). Default TAI. + Type of timescale for MJD (TAI or UTC currently). + Default TAI. seeing_col: `str`, optional - Name of the seeing column in the obsData. Default 'seeingFwhmGeom'. - This should be the geometric/physical seeing as it is used for the trailing loss calculation. + Name of the seeing column in the obsData. + Default 'seeingFwhmGeom'. + This should be the geometric/physical seeing + as it is used for the trailing loss calculation. visit_exp_time_col: `str`, optional - Name of the visit exposure time column in the obsData. Default 'visitExposureTime'. + Name of the visit exposure time column in the obsData. + Default 'visitExposureTime'. obs_ra: `str`, optional Name of the RA column in the obsData. Default 'fieldRA'. obs_dec: `str`, optional @@ -59,13 +70,15 @@ class BaseObs: obs_rot_sky_pos: `str`, optional Name of the Rotator column in the obsData. Default 'rotSkyPos'. obs_degrees: `bool`, optional - Whether the observational data is in degrees or radians. Default True (degrees). + Whether the observational data is in degrees or radians. + Default True (degrees). outfile_name : `str`, optional The output file name. Default is 'lsst_obs.dat'. obs_info : `str`, optional A string that captures provenance information about the observations. - For example: 'baseline_v2.0_10yrs, years 0-5' or 'baseline2018a minus NES' + For example: 'baseline_v2.0_10yrs, years 0-5' + or 'baseline2018a minus NES' Default ''. """ @@ -145,19 +158,23 @@ def _setup_camera(self, camera_footprint_file=None): self.camera = LsstCameraFootprint(units="degrees", footprint_file=camera_footprint_file) def setup_ephemerides(self): - """Initialize the ephemeris generator. Save the setup PyOrbEphemeris class. + """Initialize the ephemeris generator. + Save the setup PyOrbEphemeris class. - This uses the default engine, pyoorb - however this could be overwritten to use another generator. + This uses the default engine, pyoorb - + however this could be overwritten to use another generator. """ self.ephems = PyOrbEphemerides(ephfile=self.eph_file) def generate_ephemerides(self, sso, times, eph_mode=None, eph_type=None): - """Generate ephemerides for 'sso' at times 'times' (assuming MJDs, with timescale self.obs_time_scale). + """Generate ephemerides for 'sso' at times 'times' + (assuming MJDs, with timescale self.obs_time_scale). - The default engine here is pyoorb, however this method could be overwritten to use another ephemeris - generator, such as ADAM. + The default engine here is pyoorb, however other ephemeris generation + could be used with a matching API to PyOrbEphemerides. - The initialized pyoorb class (PyOrbEphemerides) is saved, to skip setup on subsequent calls. + The initialized pyoorb class (PyOrbEphemerides) is saved, + to skip setup on subsequent calls. Parameters ---------- @@ -166,13 +183,15 @@ def generate_ephemerides(self, sso, times, eph_mode=None, eph_type=None): times: `np.ndarray` The times at which to generate ephemerides. MJD. eph_mode: `str` or None, optional - Potentially override default eph_mode (self.eph_mode). Must be '2body' or 'nbody'. + Potentially override default eph_mode (self.eph_mode). + Must be '2body' or 'nbody'. Returns ------- ephs : `pd.Dataframe` - Results from propigating the orbit(s) to the specified times. Columns like: - obj_id, sedname, time, ra, dec, dradt, ddecdt, phase, solarelon, etc + Results from propigating the orbit(s) to the specified times. + Columns like: + obj_id, sedname, time, ra, dec, dradt, ddecdt, phase, solarelon. """ if not hasattr(self, "ephems"): self.setup_ephemerides() @@ -194,16 +213,19 @@ def generate_ephemerides(self, sso, times, eph_mode=None, eph_type=None): def calc_trailing_losses(self, velocity, seeing, texp=30.0): """Calculate the detection and SNR trailing losses. - 'Trailing' losses = loss in sensitivity due to the photons from the source being - spread over more pixels; thus more sky background is included when calculating the - flux from the object and thus the SNR is lower than for an equivalent brightness - stationary/PSF-like source. dmagTrail represents this loss. - - 'Detection' trailing losses = loss in sensitivity due to the photons from the source being - spread over more pixels, in a non-stellar-PSF way, while source detection is (typically) done - using a stellar PSF filter and 5-sigma cutoff values based on assuming peaks from stellar PSF's - above the background; thus the SNR is lower than for an equivalent brightness stationary/PSF-like - source (and by a greater factor than just the simple SNR trailing loss above). + 'Trailing' losses = loss in sensitivity due to the photons from the + source being spread over more pixels; thus more sky background is + included when calculating the flux from the object and thus the SNR + is lower than for an equivalent brightness stationary/PSF-like source. + dmagTrail represents this loss. + + 'Detection' trailing losses = loss in sensitivity due to the photons + from the source being spread over more pixels, in a non-stellar-PSF + way, while source detection is (typically) done using a stellar PSF + filter and 5-sigma cutoff values based on assuming peaks from + stellar PSF's above the background; thus the SNR is lower than for an + equivalent brightness stationary/PSF-like source (and by a greater + factor than just the simple SNR trailing loss above). dmag_detect represents this loss. Parameters @@ -217,8 +239,10 @@ def calc_trailing_losses(self, velocity, seeing, texp=30.0): Returns ------- - dmag Trail, dmag_detect : (`np.ndarray`, `np.ndarray`) or (`float`, `float`) - dmag_trail and dmag_detect for each set of velocity/seeing/texp values. + dmag Trail, dmag_detect : (`np.ndarray` `np.ndarray`) + or (`float`, `float`) + dmag_trail and dmag_detect for each set of + velocity/seeing/texp values. """ a_trail = 0.761 b_trail = 1.162 @@ -238,10 +262,10 @@ def read_filters( v_dir=None, v_filter="harris_V.dat", ): - """ - Read (LSST) and Harris (V) filter throughput curves. + """Read (LSST) and Harris (V) filter throughput curves. - Only the defaults are LSST specific; this can easily be adapted for any survey. + Only the defaults are LSST specific; + this can easily be adapted for any survey. Parameters ---------- @@ -250,8 +274,9 @@ def read_filters( Default set by 'LSST_THROUGHPUTS_BASELINE' env variable. bandpass_root : `str`, optional Rootname of the throughput curves in filterlist. - E.g. throughput curve names are bandpass_root + filterlist[i] + bandpass_suffix - Default total\_ (appropriate for LSST throughput repo). + E.g. throughput curve names are bandpass_root + filterlist[i] + + bandpass_suffix + Default `total_` (appropriate for LSST throughput repo). bandpass_suffix : `str`, optional Suffix for the throughput curves in filterlist. Default '.dat' (appropriate for LSST throughput repo). @@ -260,7 +285,7 @@ def read_filters( Default ('u', 'g', 'r', 'i', 'z', 'y') v_dir : `str`, optional Directory containing the V band throughput curve. - Default None = $SIMS_MOVINGOBJECTS_DIR/data. + Default None = $RUBIN_SIM_DATA_DIR/movingObjects v_filter : `str`, optional Name of the V band filter curve. Default harris_V.dat. @@ -281,10 +306,11 @@ def read_filters( def calc_colors(self, sedname="C.dat", sed_dir=None): """Calculate the colors for a given SED. - If the sedname is not already in the dictionary self.colors, this reads the - SED from disk and calculates all V-[filter] colors for all filters in self.filterlist. - The result is stored in self.colors[sedname][filter], so will not be recalculated if - the SED + color is reused for another object. + If the sedname is not already in the dictionary self.colors, + this reads the SED from disk and calculates all V-[filter] colors + for all filters in self.filterlist. + The result is stored in self.colors[sedname][filter], so will not + be recalculated if the SED + color is reused for another object. Parameters ---------- @@ -292,12 +318,12 @@ def calc_colors(self, sedname="C.dat", sed_dir=None): Name of the SED. Default 'C.dat'. sed_dir : `str`, optional Directory containing the SEDs of the moving objects. - Default None = $SIMS_MOVINGOBJECTS_DIR/data. + Default None = $RUBIN_SIM_DATA_DIR/movingObjects, Returns ------- - colors : `dict` - Dictionary of the colors in self.filterlist for this particular Sed. + colors : `dict` {'filter': color}} + Dictionary of the colors in self.filterlist. """ if sedname not in self.colors: if sed_dir is None: @@ -311,7 +337,8 @@ def calc_colors(self, sedname="C.dat", sed_dir=None): return self.colors[sedname] def sso_in_circle_fov(self, ephems, obs_data): - """Determine which observations are within a circular fov for a series of observations. + """Determine which observations are within a circular fov + for a series of observations. Note that ephems and obs_data must be the same length. Parameters @@ -324,7 +351,8 @@ def sso_in_circle_fov(self, ephems, obs_data): Returns ------- indices : `np.ndarray` - Returns the indexes of the numpy array of the object observations which are inside the fov. + Returns the indexes of the numpy array of the object + observations which are inside the fov. """ return self._sso_in_circle_fov(ephems, obs_data, self.r_fov) @@ -347,7 +375,8 @@ def _sso_in_circle_fov(self, ephems, obs_data, r_fov): return idx_obs def sso_in_rectangle_fov(self, ephems, obs_data): - """Determine which observations are within a rectangular FoV for a series of observations. + """Determine which observations are within a rectangular FoV + for a series of observations. Note that ephems and obs_data must be the same length. Parameters @@ -360,7 +389,8 @@ def sso_in_rectangle_fov(self, ephems, obs_data): Returns ------- indices : `np.ndarray` - Returns the indexes of the numpy array of the object observations which are inside the fov. + Returns the indexes of the numpy array of the object + observations which are inside the fov. """ return self._sso_in_rectangle_fov(ephems, obs_data, self.x_tol, self.y_tol) @@ -371,7 +401,8 @@ def _sso_in_rectangle_fov(self, ephems, obs_data, x_tol, y_tol): return idx_obs def sso_in_camera_fov(self, ephems, obs_data): - """Determine which observations are within the actual camera footprint for a series of observations. + """Determine which observations are within the actual + camera footprint for a series of observations. Note that ephems and obs_data must be the same length. Parameters @@ -384,7 +415,8 @@ def sso_in_camera_fov(self, ephems, obs_data): Returns ------- indices : `np.ndarray` - Returns the indexes of the numpy array of the object observations which are inside the fov. + Returns the indexes of the numpy array of the object + observations which are inside the fov. """ if not hasattr(self, "camera"): self._setup_camera() @@ -408,7 +440,8 @@ def sso_in_camera_fov(self, ephems, obs_data): return idx def sso_in_fov(self, ephems, obs_data): - """Convenience layer - determine which footprint method to apply (from self.footprint) and use it. + """Convenience layer - determine which footprint method to + apply (from self.footprint) and use it. Parameters ---------- @@ -420,7 +453,8 @@ def sso_in_fov(self, ephems, obs_data): Returns ------- indices : `np.ndarray` - Returns the indexes of the numpy array of the object observations which are inside the fov. + Returns the indexes of the numpy array of the object + observations which are inside the fov. """ if self.footprint == "camera": return self.sso_in_camera_fov(ephems, obs_data) diff --git a/rubin_sim/moving_objects/cheby_fits.py b/rubin_sim/moving_objects/cheby_fits.py index 9d94438b6..7e4d9d314 100644 --- a/rubin_sim/moving_objects/cheby_fits.py +++ b/rubin_sim/moving_objects/cheby_fits.py @@ -20,57 +20,77 @@ def three_sixty_to_neg(ra): class ChebyFits: """Generates chebyshev coefficients for a provided set of orbits. - Calculates true ephemerides using PyEphemerides, then fits these positions with a constrained - Chebyshev Polynomial, using the routines in chebyshevUtils.py. - Many chebyshev polynomials are used to fit one moving object over a given timeperiod; - typically, the length of each segment is typically about 2 days for MBAs. - The start and end of each segment must match exactly, and the entire segments must - fit into the total timespan an integer number of times. This is accomplished by setting n_decimal to - the number of decimal places desired in the 'time' value. For faster moving objects, this number needs - be greater to allow for smaller subdivisions. It's tempting to allow flexibility to the point of not - enforcing this for non-database use; however, then the resulting ephemeris may have multiple values - depending on which polynomial segment was used to calculate the ephemeris. - The length of each chebyshev polynomial is related to the number of ephemeris positions used to fit that - polynomial by ngran: + Calculates true ephemerides using PyEphemerides, then fits these + positions with a constrained Chebyshev Polynomial, using the routines + in chebyshevUtils.py. + + Many chebyshev polynomials are used to fit one moving object over + a given timeperiod; typically, the length of each segment is typically + about 2 days for MBAs. The start and end of each segment must match + exactly, and the entire segments must fit into the total timespan an + integer number of times. This is accomplished by setting n_decimal to + the number of decimal places desired in the 'time' value. + For faster moving objects, this number needs be greater to allow for + smaller subdivisions. + It's tempting to allow flexibility to the point of not + enforcing this non-overlap; however, then the resulting ephemeris + may have multiple values depending on which polynomial segment was + used to calculate the ephemeris. + + The length of each chebyshev polynomial is related to the number of + ephemeris positions used to fit that polynomial by ngran: length = timestep * ngran - The length of each polynomial is adjusted so that the residuals in RA/Dec position - are less than sky_tolerance - default = 2.5mas. - The polynomial length (and the resulting residuals) is affected by ngran (i.e. timestep). + The length of each polynomial is adjusted so that the residuals in + RA/Dec position are less than sky_tolerance - default = 2.5mas. + The polynomial length (and the resulting residuals) is affected + by ngran (i.e. timestep). Default values are based on Yusra AlSayaad's work. Parameters ---------- - orbits_obj : Orbits + orbits_obj : `rubin_sim.moving_objects.Orbits` The orbits for which to fit chebyshev polynomial coefficients. - t_start : float + t_start : `float` The starting point in time to fit coefficients. MJD. - t_span : float - The time span (starting at t_start) over which to fit coefficients. Days. - time_scale : {'TAI', 'UTC', 'TT'} - The timescale of the MJD time, t_start, and the time_scale that should be - used with the chebyshev coefficients. - obsCode : int, optional - The observatory code of the location for which to generate ephemerides. Default 807 (CTIO). - sky_tolerance : float, optional - The desired tolerance in mas between ephemerides calculated by OpenOrb and fitted values. + t_span : `float` + The time span (starting at t_start) over which to fit coefficients + (Days). + time_scale : `str`, optional + One of {'TAI', 'UTC', 'TT'} + The timescale of the MJD time, t_start, and the time_scale + that should be used with the chebyshev coefficients. + obsCode : `int`, optional + The observatory code of the location for which to generate + ephemerides. Default I11 (Cerro Pachon). + sky_tolerance : `float`, optional + The desired tolerance in mas between ephemerides calculated by + OpenOrb and fitted values. Default 2.5 mas. - nCoeff_position : int, optional - The number of Chebyshev coefficients to fit for the RA/Dec positions. Default 14. - nCoeff_vmag : int, optional - The number of Chebyshev coefficients to fit for the V magnitude values. Default 9. - nCoeff_delta : int, optional - The number of Chebyshev coefficients to fit for the distance between Earth/Object. Default 5. - nCoeff_elongation : int, optional - The number of Chebyshev coefficients to fit for the solar elongation. Default 5. - ngran : int, optional - The number of ephemeris points within each Chebyshev polynomial segment. Default 64. - eph_file : str, optional - The path to the JPL ephemeris file to use. Default is '$OORB_DATA/de405.dat'. - n_decimal : int, optional - The number of decimal places to allow in the segment length (and thus the times of the endpoints) - can be limited to n_decimal places. Default 10. - For LSST SIMS moving object database, this should be 13 decimal places for NEOs and 0 for all others. + nCoeff_position : `int`, optional + The number of Chebyshev coefficients to fit for the RA/Dec positions. + Default 14. + nCoeff_vmag : `int`, optional + The number of Chebyshev coefficients to fit for the V magnitude values. + Default 9. + nCoeff_delta : `int`, optional + The number of Chebyshev coefficients to fit for the distance + between Earth/Object. Default 5. + nCoeff_elongation : `int`, optional + The number of Chebyshev coefficients to fit for the solar + elongation. Default 5. + ngran : `int`, optional + The number of ephemeris points within each Chebyshev + polynomial segment. Default 64. + eph_file : `str`, optional + The path to the JPL ephemeris file to use. + Default is '$OORB_DATA/de405.dat'. + n_decimal : `int`, optional + The number of decimal places to allow in the segment length + (and thus the times of the endpoints) can be limited to + n_decimal places. Default 10. + For LSST SIMS moving object database, this should be 13 decimal + places for NEOs and 0 for all others. """ def __init__( @@ -79,7 +99,7 @@ def __init__( t_start, t_span, time_scale="TAI", - obscode=807, + obscode="I11", sky_tolerance=2.5, n_coeff_position=14, n_coeff_vmag=9, @@ -98,13 +118,13 @@ def __init__( # And then set orbits. self._set_orbits(orbits_obj) # Save input parameters. - # We have to play some games with the start and end times, using Decimal, - # in order to get the subdivision and times to match exactly, up to n_decimal places. + # We have to play some games with the start and end times, + # using Decimal, in order to get the subdivision and times to + # match exactly, up to n_decimal places. self.n_decimal = int(n_decimal) self.t_start = round(t_start, self.n_decimal) self.t_span = round(t_span, self.n_decimal) self.t_end = round(self.t_start + self.t_span, self.n_decimal) - # print('input times', self.t_start, self.t_span, self.t_end, orbits_obj.orbits.obj_id.as_matrix()) if time_scale.upper() == "TAI": self.time_scale = "TAI" elif time_scale.upper() == "UTC": @@ -121,7 +141,7 @@ def __init__( self.n_coeff["vmag"] = int(n_coeff_vmag) self.n_coeff["elongation"] = int(n_coeff_elongation) self.ngran = int(ngran) - # Precompute multipliers (we only do this once, instead of per segment). + # Precompute multipliers (we only do this once). self._precompute_multipliers() # Initialize attributes to save the coefficients and residuals. self.coeffs = { @@ -150,7 +170,7 @@ def _set_orbits(self, orbits_obj): Parameters ---------- - orbits_obj : Orbits + orbits_obj : `rubin_sim.moving_objects.Orbits` The orbits to use to generate ephemerides. """ if not isinstance(orbits_obj, Orbits): @@ -164,7 +184,8 @@ def _precompute_multipliers(self): Calculate these once, rather than for each segment. """ # The nPoints are predetermined here, based on Yusra's earlier work. - # The weight is based on Newhall, X. X. 1989, Celestial Mechanics, 45, p. 305-310 + # The weight is based on Newhall, X. X. 1989, Celestial Mechanics, + # 45, p. 305-310 self.multipliers = {} self.multipliers["position"] = make_cheb_matrix(self.ngran + 1, self.n_coeff["position"], weight=0.16) self.multipliers["vmag"] = make_cheb_matrix_only_x(self.ngran + 1, self.n_coeff["vmag"]) @@ -172,28 +193,30 @@ def _precompute_multipliers(self): self.multipliers["elongation"] = make_cheb_matrix_only_x(self.ngran + 1, self.n_coeff["elongation"]) def _length_to_timestep(self, length): - """Convert chebyshev polynomial segment lengths to the corresponding timestep over the segment. + """Convert chebyshev polynomial segment lengths to the + corresponding timestep over the segment. Parameters ---------- - length : float + length : `float` The chebyshev polynomial segment length (nominally, days). Returns ------- - float + timestep : `float` The corresponding timestep, = length/ngran (nominally, days). """ return length / self.ngran def make_all_times(self): - """Using t_start and t_end, generate a numpy array containing times spaced at - timestep = self.length/self.ngran. - The expected use for this time array would be to generate ephemerides at each timestep. + """Using t_start and t_end, generate a numpy array containing + times spaced at timestep = self.length/self.ngran. + The expected use for this time array would be to generate + ephemerides at each timestep. Returns ------- - np.ndarray + times : `np.ndarray` Numpy array of times. """ try: @@ -209,7 +232,7 @@ def generate_ephemerides(self, times, by_object=True): Parameters ---------- - times : np.ndarray + times : `np.ndarray` The times to use for ephemeris generation. """ return self.pyephems.generate_ephemerides( @@ -222,22 +245,23 @@ def generate_ephemerides(self, times, by_object=True): ) def _round_length(self, length): - """Modify length, to fit in an 'integer multiple' within the t_start/t_end, - and to have the desired number of decimal values. + """Modify length, to fit in an 'integer multiple' within the + t_start/t_end, and to have the desired number of decimal values. Parameters ---------- - length : float + length : `float` The input length value to be rounded. Returns ------- - float + `float` The rounded length value. """ length = round(length, self.n_decimal) length_in = length - # Make length an integer value within the time interval, to last decimal place accuracy. + # Make length an integer value within the time interval, + # to last decimal place accuracy. counter = 0 prev_int_factor = 0 num_tolerance = 10.0 ** (-1 * (self.n_decimal - 1)) @@ -263,13 +287,16 @@ def _test_residuals(self, length, cutoff=99): """Calculate the position residual, for a test case. Convenience function to make calcSegmentLength easier to read. """ - # The pos_resid used will be the 'cutoff' percentile of all max residuals per object. + # The pos_resid used will be the 'cutoff' percentile of all + # max residuals per object. max_pos_resids = np.zeros(len(self.orbits_obj), float) timestep = self._length_to_timestep(length) - # Test for one segment near the start (would do at midpoint, but for long timespans - # this is not efficient .. a point near the start should be fine). + # Test for one segment near the start (would do at midpoint, + # but for long timespans this is not efficient .. + # a point near the start should be fine). times = np.arange(self.t_start, self.t_start + length + timestep / 2, timestep) - # We must regenerate ephemerides here, because the timestep is different each time. + # We must regenerate ephemerides here, because the timestep is + # different each time. ephs = self.generate_ephemerides(times, by_object=True) # Look for the coefficients and residuals. for i, e in enumerate(ephs): @@ -280,17 +307,20 @@ def _test_residuals(self, length, cutoff=99): return pos_resid, ratio def calc_segment_length(self, length=None): - """Set the typical initial ephemeris timestep and segment length for all objects between t_start/t_end. + """Set the typical initial ephemeris timestep and segment length + for all objects between t_start/t_end. Sets self.length. - The segment length will fit into the time period between t_start/t_end an approximately integer - multiple of times, and will only have a given number of decimal places. + The segment length will fit into the time period between + t_start/t_end an approximately integer multiple of times, + and will only have a given number of decimal places. Parameters ---------- - length : float, optional - If specified, this value for the length is used, instead of calculating it here. + length : `float`, optional + If specified, this value for the length is used, + instead of calculating it here. """ # If length is specified, use it and do nothing else. if length is not None: @@ -304,40 +334,48 @@ def calc_segment_length(self, length=None): self.length = length return # Otherwise, calculate an appropriate length and timestep. - # Give a guess at a very approximate segment length, given the skyTolerance, + # Give a guess at a very approximate segment length, + # given the skyTolerance, # purposefully trying to overestimate this value. - # The actual behavior of the residuals is not linear with segment length. - # There is a linear increase at low residuals < ~2 mas / segment length < 2 days - # Then at around 2 days the residuals blow up, increasing rapidly to about 5000 mas - # (depending on orbit .. TNOs, for example, increase but only to about 300 mas, - # when the residuals resume ~linear growth out to 70 day segments if ngran=128) - # Make an arbitrary cap on segment length at 60 days, (25000 mas) ~.5 arcminute accuracy. + # The actual behavior of the residuals is not linear with + # segment length. + # There is a linear increase at low residuals + # < ~2 mas / segment length < 2 days + # Then at around 2 days the residuals blow up, + # increasing rapidly to about 5000 mas + # (depending on orbit .. TNOs, for example, increase but + # only to about 300 mas, when the residuals resume ~linear growth + # out to 70 day segments if ngran=128) + # Make an arbitrary cap on segment length at 60 days, + # (25000 mas) ~.5 arcminute accuracy. max_length = 60 max_iterations = 50 if self.sky_tolerance < 5: - # This is the cap of the low-linearity regime, looping below will refine this value. + # This is the cap of the low-linearity regime, + # looping below will refine this value. length = 2.0 elif self.sky_tolerance >= 5000: # Make a very rough guess. length = np.round((5000.0 / 20.0) * (self.sky_tolerance - 5000.0)) + 5.0 length = np.min([max_length, int(length * 10) / 10.0]) else: - # Try to pick a length somewhere in the middle of the fast increase. + # Try to pick a length in the middle of the fast increase. length = 4.0 # Tidy up some characteristics of "length": # make it fit an integer number of times into overall timespan. - # and use a given number of decimal places (easier for database storage). + # and use a given number of decimal places + # (easier for database storage). length = self._round_length(length) # Check the resulting residuals. pos_resid, ratio = self._test_residuals(length) counter = 0 - # Now should be relatively close. Start to zero in using slope around the value.ngran + # Now should be relatively close. + # Start to zero in using slope around the value.ngran while pos_resid > self.sky_tolerance and counter <= max_iterations and length > 0: length = length / 2 length = self._round_length(length) pos_resid, ratio = self._test_residuals(length) counter += 1 - # print(counter, length, pos_resid, ratio) if counter > max_iterations or length <= 0: # Add this entire segment into the failed list. for obj_id in self.orbits_obj.orbits["obj_id"].as_matrix(): @@ -352,23 +390,23 @@ def calc_segment_length(self, length=None): self.length = length def _get_coeffs_position(self, ephs): - """Calculate coefficients for the ra/dec values of a single objects ephemerides. + """Calculate coefficients for the ra/dec values of a + single objects ephemerides. Parameters ---------- - times : np.ndarray + times : `np.ndarray` The times of the ephemerides. - ephs : np.ndarray - The structured array returned by PyOrbEphemerides holding ephemeris values, for one object. + ephs : `np.ndarray` + The structured array returned by PyOrbEphemerides + holding ephemeris values, for one object. Returns ------- - np.ndarray - The ra coefficients - np.ndarray - The dec coefficients - float - The positional error residuals between fit and ephemeris values, in mas. + coeff_ra, coeff_dec, max_pos_resid : `np.ndarray`, `np.ndarray`, + `np.ndarray` + The ra coefficients, dec coefficients, and the positional error + residuals between fit and ephemeris values, in mas. """ dradt_coord = ephs["dradt"] / np.cos(np.radians(ephs["dec"])) coeff_ra, resid_ra, rms_ra_resid, max_ra_resid = chebfit( @@ -393,19 +431,21 @@ def _get_coeffs_position(self, ephs): return coeff_ra, coeff_dec, max_pos_resid def _get_coeffs_other(self, ephs): - """Calculate coefficients for the ra/dec values of a single objects ephemerides. + """Calculate coefficients for the ra/dec values of a + single objects ephemerides. Parameters ---------- - ephs : np.ndarray - The structured array returned by PyOrbEphemerides holding ephemeris values, for one object. + ephs : `np.ndarray` + The structured array returned by PyOrbEphemerides + holding ephemeris values, for one object. Returns ------- - dict - Dictionary containing the coefficients for each of 'geo_dist', 'vmag', 'elongation' - dict - Dictionary containing the max residual values for each of 'geo_dist', 'vmag', 'elongation'. + coeffs, max_resids : `dict` of `float` + Dictionary containing the coefficients for each of 'geo_dist', + 'vmag', 'elongation', and another dictionary containing the + max residual values for each of 'geo_dist', 'vmag', 'elongation'. """ coeffs = {} max_resids = {} @@ -423,16 +463,19 @@ def _get_coeffs_other(self, ephs): def calc_segments(self): """Run the calculation of all segments over the entire time span.""" # First calculate ephemerides for all objects, over entire time span. - # For some objects, we will end up recalculating the ephemeride values, but most should be fine. + # For some objects, we will end up recalculating the ephemeride values, + # but most should be fine. times = self.make_all_times() ephs = self.generate_ephemerides(times) eps = self._length_to_timestep(self.length) / 4.0 # Loop through each object to generate coefficients. for orbit_obj, e in zip(self.orbits_obj, ephs): t_segment_start = self.t_start - # Cycle through all segments until we reach the end of the period we're fitting. + # Cycle through all segments until we reach the end of the + # period we're fitting. while t_segment_start < (self.t_end - eps): - # Identify the subset of times and ephemerides which are relevant for this segment + # Identify the subset of times and ephemerides + # which are relevant for this segment # (at the default segment size). t_segment_end = round(t_segment_start + self.length, self.n_decimal) subset = np.where((times >= t_segment_start) & (times < t_segment_end + eps)) @@ -440,27 +483,28 @@ def calc_segments(self): t_segment_start = t_segment_end def calc_one_segment(self, orbit_obj, ephs): - """Calculate the coefficients for a single Chebyshev segment, for a single object. + """Calculate the coefficients for a single Chebyshev segment, + for a single object. - Calculates the coefficients and residuals, and saves this information to self.coeffs, - self.resids, and (if there are problems), self.failed. + Calculates the coefficients and residuals, and saves this + information to self.coeffs, self.resids, and + (if there are problems), self.failed. Parameters ---------- - orbit_obj : Orbits + orbit_obj : `rubin_sim.moving_objects.Orbits` The single Orbits object we're fitting at the moment. - ephs : np.ndarray - The ephemerides we're fitting at the moment (for the single object / single segment). + ephs : `np.ndarray` + The ephemerides we're fitting at the moment + (for the single object / single segment). """ obj_id = orbit_obj.orbits.obj_id.iloc[0] t_segment_start = ephs["time"][0] t_segment_end = ephs["time"][-1] coeff_ra, coeff_dec, max_pos_resid = self._get_coeffs_position(ephs) if max_pos_resid > self.sky_tolerance: - # print('subdividing segments', orbit_obj.orbits.obj_id.iloc[0]) self._subdivide_segment(orbit_obj, ephs) else: - # print('working on ', orbit_obj.orbits.obj_id.iloc[0], 'at times', t_segment_start, t_segment_end) coeffs, max_resids = self._get_coeffs_other(ephs) fit_failed = False for k in max_resids: @@ -496,10 +540,11 @@ def _subdivide_segment(self, orbit_obj, ephs): Parameters ---------- - orbit_obj : Orbits + orbit_obj : `rubin_sim.moving_objects.Orbits` The single Orbits object we're fitting at the moment. - ephs : np.ndarray - The ephemerides we're fitting at the moment (for the single object / single segment). + ephs : `np.ndarray` + The ephemerides we're fitting at the moment + (for the single object / single segment). """ new_cheby = ChebyFits( orbit_obj, @@ -542,13 +587,14 @@ def write(self, coeff_file, resid_file, failed_file, append=False): Parameters ---------- - coeff_file : str + coeff_file : `str` The filename for the coefficient values. - resid_file : str + resid_file : `str` The filename for the residual values. - failed_file : str - The filename to write the failed fit information (if failed objects exist). - append : bool, optional + failed_file : `str` + The filename to write the failed fit information + (if failed objects exist). + append : `bool`, optional Flag to append (or overwrite) the output files. """ diff --git a/rubin_sim/moving_objects/cheby_values.py b/rubin_sim/moving_objects/cheby_values.py index 972c3ea78..34553b6bd 100644 --- a/rubin_sim/moving_objects/cheby_values.py +++ b/rubin_sim/moving_objects/cheby_values.py @@ -37,12 +37,14 @@ def __init__(self): def set_coefficients(self, cheby_fits): """Set coefficients using a ChebyFits object. - (which contains a dictionary of obj_id, t_start, t_end, ra, dec, delta, vmag, and elongation lists). + (which contains a dictionary of obj_id, t_start, t_end, ra, + dec, delta, vmag, and elongation lists). Parameters ---------- cheby_fits : `rubin_sim.movingObjects.chebyFits` - ChebyFits object, with attribute 'coeffs' - a dictionary of lists of coefficients. + ChebyFits object, with attribute 'coeffs' - + a dictionary of lists of coefficients. """ self.coeffs = cheby_fits.coeffs # Convert list of coefficients into numpy arrays. @@ -67,7 +69,8 @@ def read_coefficients(self, cheby_fits_file): raise IOError("Could not find cheby_fits_file at %s" % (cheby_fits_file)) # Read the coefficients file. coeffs = pd.read_table(cheby_fits_file, delim_whitespace=True) - # The header line provides information on the number of coefficients for each parameter. + # The header line provides information on the number of + # coefficients for each parameter. datacols = coeffs.columns.values cols = {} coeff_cols = ["ra", "dec", "geo_dist", "vmag", "elongation"] @@ -82,7 +85,8 @@ def read_coefficients(self, cheby_fits_file): self.coeffs[k] = np.empty([len(cols[k]), len(coeffs)], float) for i in range(len(cols[k])): self.coeffs[k][i] = coeffs["%s_%d" % (k, i)].values - # Add the mean RA and Dec columns (before swapping the coefficients axes). + # Add the mean RA and Dec columns + # (before swapping the coefficients axes). self.coeffs["meanRA"] = self.coeffs["ra"][0] self.coeffs["meanDec"] = self.coeffs["dec"][0] # Swap the coefficient axes so that they are [segment, coeff]. @@ -90,7 +94,8 @@ def read_coefficients(self, cheby_fits_file): self.coeffs[k] = self.coeffs[k].swapaxes(0, 1) def _eval_segment(self, segment_idx, times, subset_segments=None, mask=True): - """Evaluate the ra/dec/delta/vmag/elongation values for a given segment at a series of times. + """Evaluate the ra/dec/delta/vmag/elongation values for a + given segment at a series of times. Parameters ---------- @@ -103,14 +108,16 @@ def _eval_segment(self, segment_idx, times, subset_segments=None, mask=True): Optionally specify a subset of the total segment indexes. This lets you pick out particular obj_ids. mask : `bool`, optional - If True, returns NaNs for values outside the range of times in the segment. - If False, extrapolates segment for times outside the segment time range. + If True, returns NaNs for values outside the range of times + in the segment. + If False, extrapolates segment for times outside the + segment time range. Returns ------- ephemeris : `dict` - Dictionary of RA, Dec, delta, vmag, and elongation values for the segment indicated, - at the time indicated. + Dictionary of RA, Dec, delta, vmag, and elongation values for + the segment indicated, at the time indicated. """ if subset_segments is None: subset_segments = np.ones(len(self.coeffs["obj_id"]), dtype=bool) @@ -148,8 +155,9 @@ def _eval_segment(self, segment_idx, times, subset_segments=None, mask=True): def get_ephemerides(self, times, obj_ids=None, extrapolate=False): """Find the ephemeris information for 'obj_ids' at 'time'. - Implicit in how this is currently written is that the segments are all expected to cover the - same start/end time range across all objects. + Implicit in how this is currently written is that the segments + are all expected to cover the same start/end time range across + all objects. They do not have to have the same segment length for all objects. Parameters @@ -157,9 +165,11 @@ def get_ephemerides(self, times, obj_ids=None, extrapolate=False): times : `float` or `np.ndarray` The time to calculate ephemeris positions. obj_ids : `np.ndarray`, opt - The object ids for which to generate ephemerides. If None, then just uses all objects. + The object ids for which to generate ephemerides. + If None, then just uses all objects. extrapolate : `bool`, opt - If True, extrapolate beyond ends of segments if time outside of segment range. + If True, extrapolate beyond ends of segments if time + outside of segment range. If False, return ValueError if time is beyond range of segments. Returns @@ -205,7 +215,8 @@ def get_ephemerides(self, times, obj_ids=None, extrapolate=False): if seg_end < t: segments = np.where(self.coeffs["t_end"][obj_match] == seg_end)[0] elif seg_end == t: - # Not extrapolating, but outside the simple match case above. + # Not extrapolating, but outside the + # simple match case above. segments = np.where(self.coeffs["t_end"][obj_match] == seg_end)[0] for i, segmentIdx in enumerate(segments): ephemeris = self._eval_segment(segmentIdx, t, obj_match, mask=False) diff --git a/rubin_sim/moving_objects/chebyshev_utils.py b/rubin_sim/moving_objects/chebyshev_utils.py index b56e7880a..db8d5057c 100644 --- a/rubin_sim/moving_objects/chebyshev_utils.py +++ b/rubin_sim/moving_objects/chebyshev_utils.py @@ -23,20 +23,21 @@ def chebeval(x, p, interval=(-1.0, 1.0), do_velocity=True, mask=False): Parameters ---------- - x: `scalar` or `np.ndarray` + x : `scalar` or `np.ndarray` Points at which to evaluate the polynomial. - p: `np.ndarray` + p : `np.ndarray` Chebyshev polynomial coefficients, as returned by chebfit. - interval: 2-element list/tuple + interval : 2-element list/tuple Bounds the x-interval on which the Chebyshev coefficients were fit. - do_velocity: `bool` + do_velocity : `bool` If True, compute the first derivative at points x. - mask: `bool` + mask : `bool` If True, return Nans when the x goes beyond 'interval'. If False, extrapolate fit beyond 'interval' limits. + Returns ------- - y, v: `float` or `np.ndarray`, `float` or `np.ndarray` (or None) + y, v : `float` or `np.ndarray`, `float` or `np.ndarray` (or None) Y (position) and velocity values (if computed) """ if len(interval) != 2: @@ -102,37 +103,45 @@ def chebeval(x, p, interval=(-1.0, 1.0), do_velocity=True, mask=False): def make_cheb_matrix(n_points, n_poly, weight=0.16): """Compute C1^(-1)C2 using Newhall89 approach. - Utility function for fitting chebyshev polynomials to x(t) and dx/dt(t) forcing - equality at the end points. This function computes the matrix (C1^(-1)C2). - Multiplying this matrix by the x and dx/dt values to be fit produces the chebyshev - coefficient. This function need only be called once for a given polynomial degree and + Utility function for fitting chebyshev polynomials to + x(t) and dx/dt(t) forcing equality at the end points. + This function computes the matrix (C1^(-1)C2). + Multiplying this matrix by the x and dx/dt values to be fit + produces the chebyshev coefficient. + This function need only be called once for a given polynomial degree and number of points. The matrices returned are of shape(n_points+1)x(n_poly). The coefficients fitting the n_points+1 points, X, are found by: - A = xMultiplier * x + dxMultiplier * dxdt if derivative information is known, or - A = xMultiplier * x if no derivative information is known. - The xMultiplier matrices are different, depending on whether derivative information is known. + A = xMultiplier * x + dxMultiplier * dxdt + if derivative information is known, or + A = xMultiplier * x + if no derivative information is known. + The xMultiplier matrices are different, + depending on whether derivative information is known. Use function make_cheb_matrix_only_x if derviative is not known. See Newhall, X. X. 1989, Celestial Mechanics, 45, p. 305-310 for details. Parameters ---------- - n_points: `int` + n_points : `int` Number of point to be fits. Must be greater than 2. - n_poly: `int` + n_poly : `int` Number of polynomial terms. Polynomial degree + 1 - weight: `float`, optional + weight : `float`, optional Weight to allow control of relative effectos of position and velocity - values. Newhall80 found best results are obtained with velocity weighted - at 0.4 relative to position, giving W the form (1.0, 0.16, 1.0, 0.16,...) + values. Newhall80 found best results are obtained with + velocity weighted at 0.4 relative to position, + giving W the form (1.0, 0.16, 1.0, 0.16,...) Returns ------- c1c2: `np.ndarray` - xMultiplier, C1^(-1)C2 even rows of shape (n_points+1)x(n_poly) to be multiplied by x values. + xMultiplier, C1^(-1)C2 even rows of shape (n_points+1)x(n_poly) to + be multiplied by x values. c1c2: `np.ndarray` - dxMultiplier, C1^(-1)C2 odd rows of shape (n_points+1)x(n_poly) to be multiplied by dx/dy values + dxMultiplier, C1^(-1)C2 odd rows of shape (n_points+1)x(n_poly) to + be multiplied by dx/dy values """ tmat = np.zeros([n_points, n_poly]) tdot = np.zeros([n_points, n_poly]) @@ -187,13 +196,16 @@ def make_cheb_matrix(n_points, n_poly, weight=0.16): def make_cheb_matrix_only_x(n_points, n_poly): """Compute C1^(-1)C2 using Newhall89 approach without dx/dt - Compute xMultiplier using only the equality constraint of the x-values at the endpoints. + Compute xMultiplier using only the equality constraint of the x-values + at the endpoints. To be used when first derivatives are not available. If chebyshev approximations are strung together piecewise only the x-values and not the first derivatives will be continuous at the boundaries. Multiplying this matrix by the x-values to be fit produces the chebyshev - coefficients. This function need only be called once for a given polynomial degree and - number of points. See Newhall, X. X. 1989, Celestial Mechanics, 45, p. 305-310. + coefficients. This function need only be called once for a given + polynomial degree and + number of points. + See Newhall, X. X. 1989, Celestial Mechanics, 45, p. 305-310. Parameters ---------- @@ -205,7 +217,8 @@ def make_cheb_matrix_only_x(n_points, n_poly): Returns ------- c1c2: `np.ndarray` - xMultiplier, Even rows of C1^(-1)C2 w/ shape (n_points+1)x(n_poly) to be multiplied by x values + xMultiplier, Even rows of C1^(-1)C2 w/ shape (n_points+1)x(n_poly) + to be multiplied by x values """ tmat = np.zeros([n_points, n_poly]) @@ -239,7 +252,8 @@ def make_cheb_matrix_only_x(n_points, n_poly): def chebfit(t, x, dxdt=None, x_multiplier=None, dx_multiplier=None, n_poly=7): - """Fit Chebyshev polynomial constrained at endpoints using Newhall89 approach. + """Fit Chebyshev polynomial constrained at endpoints using + Newhall89 approach. Return Chebyshev coefficients and statistics from fit to array of positions (x) and optional velocities (dx/dt). @@ -247,8 +261,9 @@ def chebfit(t, x, dxdt=None, x_multiplier=None, dx_multiplier=None, n_poly=7): derivative of the interpolating polynomial at the endpoints will be exactly equal to the input endpoint values. Many approximations may be piecewise strung together and the function value - and its first derivative will be continuous across boundaries. If derivatives - are not provided, only the function value will be continuous across boundaries. + and its first derivative will be continuous across boundaries. + If derivatives are not provided, only the function value will be + continuous across boundaries. If x_multiplier and dx_multiplier are not provided or are an inappropriate shape for t and x, they will be recomputed. @@ -280,7 +295,8 @@ def chebfit(t, x, dxdt=None, x_multiplier=None, dx_multiplier=None, n_poly=7): a_n : `np.ndarray` Array of chebyshev coefficients with length=n_poly. residuals : `np.ndarray` - Array of residuals of the tabulated function x minus the approximated function. + Array of residuals of the tabulated function x minus the + approximated function. rms : `float` The rms of the residuals in the fit. maxresid : `float` diff --git a/rubin_sim/moving_objects/direct_obs.py b/rubin_sim/moving_objects/direct_obs.py index d3e361e3f..ec2a3adaf 100644 --- a/rubin_sim/moving_objects/direct_obs.py +++ b/rubin_sim/moving_objects/direct_obs.py @@ -12,50 +12,64 @@ class DirectObs(BaseObs): """ - Generate observations of a set of moving objects: exact ephemeris at the times of each observation. + Generate observations of a set of moving objects: + exact ephemeris at the times of each observation. - First generates observations on a rough grid and looks for observations within a specified tolerance - of the actual observations; for the observations which pass this cut, generates a precise ephemeris - and checks if the object is within the FOV. + First generates observations on a rough grid and looks for + observations within a specified tolerance + of the actual observations; for the observations which pass this cut, + generates a precise ephemeris and checks if the object is within the FOV. Parameters ---------- - footprint: `str`, optional - Specify the footprint for the FOV. Options include "camera", "circle", "rectangle". - 'Camera' means use the actual LSST camera footprint (following a rough cut with a circular FOV). + footprint : `str`, optional + Specify the footprint for the FOV. + Options include "camera", "circle", "rectangle". + 'Camera' means use the actual LSST camera footprint + (following a rough cut with a circular FOV). Default is circular FOV. r_fov : `float`, optional If footprint is "circular", this is the radius of the fov (in degrees). Default 1.75 degrees. x_tol : `float`, optional - If footprint is "rectangular", this is half of the width of the (on-sky) fov in the RA - direction (in degrees). - Default 5 degrees. (so size of footprint in degrees will be 10 degrees in the RA direction). + If footprint is "rectangular", this is half of the width of + the (on-sky) fov in the RA direction (in degrees). + Default 5 degrees. y_tol : `float`, optional - If footprint is "rectangular", this is half of the width of the fov in Declination (in degrees). - Default is 3 degrees (so size of footprint in degrees will be 6 degrees in the Dec direction). + If footprint is "rectangular", this is half of the width of + the fov in Declination (in degrees). + Default is 3 degrees eph_mode: `str`, optional Mode for ephemeris generation - nbody or 2body. Default is nbody. prelim_eph_mode: str, optional - Mode for preliminary ephemeris generation, if any is done. Default is 2body. + Mode for preliminary ephemeris generation, if any is done. + Default is 2body. eph_type: `str`, optional Type of ephemerides to generate - full or basic. - Full includes all values calculated by openorb; Basic includes a more basic set. - Default is Basic. (this includes enough information for most standard MAF metrics). + Full includes all values calculated by openorb; + Basic includes a more basic set. + Default is Basic. + (this includes enough information for most standard MAF metrics). eph_file: `str` or None, optional - The name of the planetary ephemerides file to use for ephemeris generation. - Default (None) will use the default for PyOrbEphemerides. + The name of the planetary ephemerides file to use in ephemeris + generation. Default (None) will use the default for PyOrbEphemerides. obs_code: `str`, optional - Observatory code for ephemeris generation. Default is "I11" - Cerro Pachon. + Observatory code for ephemeris generation. + Default is "I11" - Cerro Pachon. obs_time_col: `str`, optional - Name of the time column in the obsData. Default 'observationStartMJD'. + Name of the time column in the obsData. + Default 'observationStartMJD'. obs_time_scale: `str`, optional - Type of timescale for MJD (TAI or UTC currently). Default TAI. + Type of timescale for MJD (TAI or UTC currently). + Default TAI. seeing_col: `str`, optional - Name of the seeing column in the obsData. Default 'seeingFwhmGeom'. - This should be the geometric/physical seeing as it is used for the trailing loss calculation. + Name of the seeing column in the obsData. + Default 'seeingFwhmGeom'. + This should be the geometric/physical seeing as it is used + for the trailing loss calculation. visit_exp_time_col: `str`, optional - Name of the visit exposure time column in the obsData. Default 'visitExposureTime'. + Name of the visit exposure time column in the obsData. + Default 'visitExposureTime'. obs_ra: `str`, optional Name of the RA column in the obsData. Default 'fieldRA'. obs_dec: `str`, optional @@ -63,20 +77,22 @@ class DirectObs(BaseObs): obs_rot_sky_pos: `str`, optional Name of the Rotator column in the obsData. Default 'rotSkyPos'. obs_degrees: `bool`, optional - Whether the observational data is in degrees or radians. Default True (degrees). + Whether the observational data is in degrees or radians. + Default True (degrees). outfile_name : `str`, optional The output file name. Default is 'lsst_obs.dat'. obs_info : `str`, optional A string that captures provenance information about the observations. - For example: 'baseline_v2.0_10yrs, MJD 59853-61677' or 'baseline2018a minus NES' + For example: 'baseline_v2.0_10yrs, MJD 59853-61677' + or 'baseline2018a minus NES' Default ''. tstep: `float`, optional The time between initial (rough) ephemeris generation points, in days. Default 1 day. rough_tol: `float`, optional - The initial rough tolerance value for positions, used as a first cut to identify potential - observations (in degrees). + The initial rough tolerance value for positions, used as a first + cut to identify potential observations (in degrees). Default 10 degrees. pre_comp_tol : float (2.08) The radial tolerance to add when using pre-computed orbits. Should be @@ -148,22 +164,27 @@ def __init__( def run(self, orbits, obs_data, object_positions=None, object_mjds=None): """Find and write the observations of each object to disk. - For each object, generate a very rough grid of ephemeris points (typically using 2body integration). - Then identify pointings in obs_data which are within + For each object, a rough grid of ephemeris points are either + generated on the fly or read from a pre-calculated grid; + If the rough grids indicate that an object may be present + in an observation, then a more precise position is generated + for the time of the observation. + Parameters ---------- orbits : `rubin_sim.moving_objects.Orbits` - The orbits to generate ephemerides for. + The orbits for which to generate ephemerides. obs_data : `np.ndarray` The simulated pointing history data. object_positions : `np.ndarray` - Pre-computed RA,dec positions for each object in orbits (degrees) + Pre-computed RA,dec positions for each object in orbits (degrees). object_mjds : `np.ndarray` - MJD values for each pre-computed position + MJD values for each pre-computed position. """ - # If we are trying to use pre-computed positions, check that the MJDs span enough + # If we are trying to use pre-computed positions, + # check that the MJDs span the necessary time. if object_mjds is not None: if (obs_data[self.obs_time_col].min() < object_mjds.min()) | ( obs_data[self.obs_time_col].max() > object_mjds.max() @@ -241,7 +262,8 @@ def run(self, orbits, obs_data, object_positions=None, object_mjds=None): result = [] # save indx to match observation indx to object indx indx_map_visit_to_object = [] - # For each object, identify observations where the object is within the FOV (or camera footprint). + # For each object, identify observations where the object is + # within the FOV (or camera footprint). for i, sso in enumerate(orbits): objid = sso.orbits["obj_id"].iloc[0] sedname = sso.orbits["sed_filename"].iloc[0] @@ -273,7 +295,8 @@ def run(self, orbits, obs_data, object_positions=None, object_mjds=None): ephs_idxs = np.searchsorted(ephs["time"], obs_data[self.obs_time_col]) rough_idx_obs = self._sso_in_circle_fov(ephs[ephs_idxs], obs_data, self.rough_tol) else: - # Nearest neighbor search for the object_mjd closest to obs_data mjd + # Nearest neighbor search for the object_mjd closest to + # obs_data mjd pos = np.searchsorted(object_mjds, obs_data[self.obs_time_col], side="left") pos_right = pos - 1 object_indx = pos + 0 @@ -303,7 +326,7 @@ def run(self, orbits, obs_data, object_positions=None, object_mjds=None): ("%d/%d id=%s : " % (i, len(orbits), objid)) + datetime.datetime.now().strftime("Exact end: %Y-%m-%d %H:%M:%S") ) - # Identify the objects which fell within the specific footprint. + # Identify the objects which fell within the footprint. idx_obs = self.sso_in_fov(ephs, obs_data[rough_idx_obs]) if self.verbose: logging.info( diff --git a/rubin_sim/moving_objects/make_lsst_obs.py b/rubin_sim/moving_objects/make_lsst_obs.py index cc4e5c6cc..cc9375a9c 100755 --- a/rubin_sim/moving_objects/make_lsst_obs.py +++ b/rubin_sim/moving_objects/make_lsst_obs.py @@ -23,7 +23,8 @@ def setup_args(parser=None): Parameters ---------- parser: argparse.ArgumentParser, optional - Generally left at the default (None), but a user could set up their own parser if desired. + Generally left at the default (None), but a user could set up + their own parser if desired. Returns ------- diff --git a/rubin_sim/moving_objects/ooephemerides.py b/rubin_sim/moving_objects/ooephemerides.py index 0f375e4a1..96e031785 100644 --- a/rubin_sim/moving_objects/ooephemerides.py +++ b/rubin_sim/moving_objects/ooephemerides.py @@ -34,17 +34,17 @@ def get_oorb_data_dir(): class PyOrbEphemerides: - """Generate ephemerides and propagate orbits using the python interface to Oorb. + """Generate ephemerides and propagate orbits, + using the python interface to Oorb. Typical usage: - pyephs = PyOrbEphemerides() - # Set the orbital parameters, using an lsst.sims.movingObjects.Orbits object - pyephs.setOrbits(orbits) - # Generate ephemerides at times 'times'. - ephs = pyephs.generateEphemerides(times, timeScale='UTC', obscode='I11') - This class handles the packing and unpacking of the fortran style arrays that - pyoorb uses, to and from more user-friendly pandas arrays. + >>> pyephs = PyOrbEphemerides() + >>> pyephs.setOrbits(orbits) + >>> ephs = pyephs.generateEphemerides(times, timeScale, obscode) + + PyOrbEphemerides handles the packing and unpacking of the fortran style + arrays that pyoorb uses, to and from more user-friendly pandas arrays. Parameters ---------- @@ -74,7 +74,8 @@ def _init_oorb(self): def set_orbits(self, orbit_obj): """Set the orbits, to be used to generate ephemerides. - Immediately calls self._convertOorbElem to translate to the 'packed' oorb format. + Immediately calls self._convertOorbElem to translate to the + 'packed' oorb format. Parameters ---------- @@ -86,27 +87,33 @@ def set_orbits(self, orbit_obj): self._convert_to_oorb_elem(orbit_obj.orbits, orbit_obj.orb_format) def _convert_to_oorb_elem(self, orbit_dataframe, orb_format): - """Convert orbital elements into the numpy fortran-format array OpenOrb requires. + """Convert orbital elements into the numpy fortran-format + array OpenOrb requires. - The OpenOrb element format is a single array with elemenets: + The OpenOrb element format is a single array with elements: 0 : orbitId (cannot be a string) 1-6 : orbital elements, using radians for angles - 7 : element 'type' code (1 = CAR, 2 = COM, 3 = KEP, 4 = DELauny, 5 = EQX (equinoctial)) + 7 : element 'type' code + (1 = CAR, 2 = COM, 3 = KEP, 4 = DELauny, 5 = EQX (equinoctial)) 8 : epoch - 9 : timescale for epoch (1 = UTC, 2 = UT1, 3 = TT, 4 = TAI : always assumes TT) + 9 : timescale for epoch + (1 = UTC, 2 = UT1, 3 = TT, 4 = TAI : always assumes TT) 10 : magHv 11 : g - Sets self.oorb_elem, the orbit parameters in an array formatted for OpenOrb. + Sets self.oorb_elem, the orbit parameters in an array + formatted for OpenOrb. """ oorb_elem = np.zeros([len(orbit_dataframe), 12], dtype=np.double, order="F") - # Put in simple values for objid, or add method to test if any obj_id is a string. + # Put in simple values for objid, or add method to test if + # any obj_id is a string. # NOTE THAT THIS MEANS WE'VE LOST THE OBJID oorb_elem[:, 0] = np.arange(0, len(orbit_dataframe), dtype=int) + 1 # Add the appropriate element and epoch types: oorb_elem[:, 7] = np.zeros(len(orbit_dataframe), float) + self.elem_type[orb_format] oorb_elem[:, 9] = np.zeros(len(orbit_dataframe), float) + self.time_scales["TT"] - # Convert other elements INCLUDING converting inclination, node, argperi to RADIANS + # Convert other elements INCLUDING converting inclination, + # node, argperi to RADIANS if orb_format == "KEP": oorb_elem[:, 1] = orbit_dataframe["a"] oorb_elem[:, 2] = orbit_dataframe["e"] @@ -137,17 +144,14 @@ def _convert_to_oorb_elem(self, orbit_dataframe, orb_format): self.orb_format = orb_format def convert_from_oorb_elem(self): - """Translate pyoorb-style orbital element array back into dataframe. - - Parameters - ---------- - oorbElem : `np.ndarray` - The orbital elements in OpenOrb format. + """Translate pyoorb-style (fortran packed) orbital element array + into a pandas dataframe. Operates on self.oorb_elem. Returns ------- new_orbits : `pd.DataFrame` - A DataFrame with the appropriate subset of columns relating to orbital elements. + A DataFrame with the appropriate subset of columns + relating to orbital elements. """ if self.orb_format == "KEP": new_orbits = pd.DataFrame( @@ -215,12 +219,15 @@ def convert_from_oorb_elem(self): del new_orbits["elem_type"] del new_orbits["epoch_type"] del new_orbits["oorbId"] - # To incorporate with original Orbits object, need to swap back to original obj_ids - # as well as put back in original SEDs. + # To incorporate with original Orbits object, need to swap + # back to original obj_ids as well as put back in original SEDs. return new_orbits def convert_orbit_format(self, orb_format="CAR"): - """Convert orbital elements from the format in orbitObj into 'format'. + """Convert orbital elements into `format`. + + Example: converts from self.oorb_elem[orb_format] (such as KEP) + to oorb_format (such as CAR). Parameters ---------- @@ -238,14 +245,16 @@ def convert_orbit_format(self, orb_format="CAR"): return def _convert_times(self, times, time_scale="UTC"): - """Generate an oorb-format array of the times desired for the ephemeris generation. + """Generate an oorb-format array of the times desired for the + ephemeris generation. Parameters ---------- times : `np.ndarray` or `float` The ephemeris times (MJD) desired time_scale : `str`, optional - The timescale (UTC, UT1, TT, TAI) of the ephemeris MJD values. Default = UTC, MJD. + The timescale (UTC, UT1, TT, TAI) of the ephemeris MJD values. + Default = UTC, MJD. Returns ------- @@ -271,7 +280,8 @@ def _generate_oorb_ephs_full(self, eph_times, obscode="I11", eph_mode="N"): ephtimes : `np.ndarray` Ephemeris times in oorb format (see self.convertTimes) obscode : `int` or `str`, optional - The observatory code for ephemeris generation. Default=I11 (Cerro Pachon). + The observatory code for ephemeris generation. + Default=I11 (Cerro Pachon). Returns ------- @@ -289,10 +299,11 @@ def _generate_oorb_ephs_full(self, eph_times, obscode="I11", eph_mode="N"): return oorb_ephems def _convert_oorb_ephs_full(self, oorb_ephs, by_object=True): - """Converts oorb ephemeris array to numpy recarray, with labeled columns. + """Converts oorb ephemeris array to np.ndarray. - The oorb ephemeris array is a 3-d array organized as: (object / times / eph@time) - [objid][time][ephemeris information @ that time] with ephemeris elements + The oorb ephemeris array is a 3-d array organized as: + (object / times / eph@time) + [objid][time][ephemeris information @ that time] with elements ! (1) modified julian date ! (2) right ascension (deg) ! (3) declination (deg) @@ -318,31 +329,35 @@ def _convert_oorb_ephs_full(self, oorb_ephs, by_object=True): ! (23) lunar phase [0...1] ! (24) lunar elongation (deg, distance between the target and the Moon) ! (25) heliocentric ecliptic cartesian x coordinate for the object (au) - ! (26) heliocentric ecliptic cartesian y coordinate for the object (au) - ! (27) heliocentric ecliptic cartesian z coordinate for the objects (au) - ! (28) heliocentric ecliptic cartesian x rate for the object (au/day)) - ! (29) heliocentric ecliptic cartesian y rate for the object (au/day) - ! (30) heliocentric ecliptic cartesian z rate for the objects (au/day) - ! (31) heliocentric ecliptic cartesian coordinates for the observatory (au) - ! (32) heliocentric ecliptic cartesian coordinates for the observatory (au) - ! (33) heliocentric ecliptic cartesian coordinates for the observatory (au) + ! (26) helio ecliptic cartesian y coordinate for the object (au) + ! (27) helio ecliptic cartesian z coordinate for the objects (au) + ! (28) helio ecliptic cartesian x rate for the object (au/day)) + ! (29) helio ecliptic cartesian y rate for the object (au/day) + ! (30) helio ecliptic cartesian z rate for the objects (au/day) + ! (31) helio ecliptic cartesian coordinates for the observatory (au) + ! (32) helio ecliptic cartesian coordinates for the observatory (au) + ! (33) helio ecliptic cartesian coordinates for the observatory (au) ! (34) true anomaly (currently only a dummy value) - Here we convert to a numpy recarray, grouped either by object (default) + Here we convert to a numpy.ndarray, grouped either by object (default) or by time (if by_object=False). - The resulting numpy recarray is composed of columns (of each ephemeris element), - where each column is 2-d array with first axes either 'object' or 'time'. + The resulting array is composed of columns (of each ephemeris element), + where each column is 2-d array with first axes either 'object' + or 'time'. - if by_object = True : [ephemeris elements][object][time] - (i.e. the 'ra' column = 2-d array, where the [0] axis (length) equals the number of ephTimes) + (i.e. the 'ra' column = 2-d array, where the [0] axis (length) + equals the number of ephTimes) - if by_object = False : [ephemeris elements][time][object] - (i.e. the 'ra' column = 2-d arrays, where the [0] axis (length) equals the number of objects) + (i.e. the 'ra' column = 2-d arrays, where the [0] axis (length) + equals the number of objects) Parameters ---------- oorb_ephs : `np.ndarray` The oorb-formatted ephemeris values by_object : `bool`, optional - If True (default), resulting converted ephemerides are grouped by object. + If True (default), resulting converted ephemerides are grouped + by object. If False, resulting converted ephemerides are grouped by time. Returns @@ -408,7 +423,8 @@ def _generate_oorb_ephs_basic(self, eph_times, obscode="I11", eph_mode="N"): ephtimes : `np.ndarray` Ephemeris times in oorb format (see self.convertTimes). obscode : `int` or `str`, optional - The observatory code for ephemeris generation. Default=I11 (Cerro Pachon). + The observatory code for ephemeris generation. + Default=I11 (Cerro Pachon). Returns ------- @@ -426,10 +442,13 @@ def _generate_oorb_ephs_basic(self, eph_times, obscode="I11", eph_mode="N"): return oorb_ephems def _convert_oorb_ephs_basic(self, oorb_ephs, by_object=True): - """Converts oorb ephemeris array to numpy recarray, with labeled columns. + """Converts oorb ephemeris array to numpy recarray, + with labeled columns. - The oorb ephemeris array is a 3-d array organized as: (object / times / eph@time) - [objid][time][ephemeris information @ that time] with ephemeris elements + The oorb ephemeris array is a 3-d array organized as: + (object / times / eph@time) + [objid][time][ephemeris information @ that time] with ephemeris + elements ! (1) modified julian date ! (2) right ascension (deg) ! (3) declination (deg) @@ -442,21 +461,25 @@ def _convert_oorb_ephs_basic(self, oorb_ephs, by_object=True): ! (10) predicted apparent V-band magnitude ! (11) true anomaly (currently only a dummy value) - Here we convert to a numpy recarray, grouped either by object (default) + Here we convert to a numpy array, grouped either by object (default) or by time (if by_object=False). - The resulting numpy recarray is composed of columns (of each ephemeris element), - where each column is 2-d array with first axes either 'object' or 'time'. + The resulting array is composed of columns (of each ephemeris element), + where each column is 2-d array with first axes either 'object' + or 'time'. - if by_object = True : [ephemeris elements][object][time] - (i.e. the 'ra' column = 2-d array, where the [0] axis (length) equals the number of ephTimes) + (i.e. the 'ra' column = 2-d array, where the [0] axis (length) + equals the number of ephTimes) - if by_object = False : [ephemeris elements][time][object] - (i.e. the 'ra' column = 2-d arrays, where the [0] axis (length) equals the number of objects) + (i.e. the 'ra' column = 2-d arrays, where the [0] axis (length) + equals the number of objects) Parameters ---------- oorb_ephs : `np.ndarray` The oorb-formatted ephemeris values by_object : `bool`, optional - If True (default), resulting converted ephemerides are grouped by object. + If True (default), resulting converted ephemerides are grouped + by object. If False, resulting converted ephemerides are grouped by time. Returns @@ -502,34 +525,40 @@ def generate_ephemerides( ): """Calculate ephemerides for all orbits at times `times`. - This is a public method, wrapping self._convert_times, self._generateOorbEphs - and self._convertOorbEphs (which include dealing with oorb-formatting of arrays). + The returned ephemerides are a numpy array that can be grouped + by object or by time. - The return ephemerides are in a numpy recarray, with axes - - if by_object = True : [ephemeris values][object][@time] - (i.e. the 'ra' column = 2-d array, where the [0] axis (length) equals the number of eph_times) - - if by_object = False : [ephemeris values][time][@object] - (i.e. the 'ra' column = 2-d arrays, where the [0] axis (length) equals the number of objects) + If they are grouped by object (by_object = True), the array + is organized as `ephemeris_values[object][time]`. + Here the "ra" column is a 2-d array where the [0] axis + length equals the number of ephemeris times. - The ephemeris values returned to the user (== columns of the recarray) are: - ['delta', 'ra', 'dec', 'magV', 'time', 'dradt', 'ddecdt', 'phase', 'solarelon', 'velocity'] - where positions/angles are all in degrees, velocities are deg/day, and delta is the - distance between the Earth and the object in AU. + If they are grouped by time (by_object=False), the array + is organized as `ephemeris_values[time][object]`. + Here the "ra" column is a 2-d array where the [0] axis length + equals the number of objects. + + All returned positions and angles are in degrees, velocities + are degrees/day and distances are in AU. Parameters ---------- - ephtimes : `np.ndarray` - Ephemeris times in oorb format (see self.convertTimes) + ephtimes : `np.ndarray`, (N,) + Ephemeris times. + time_scale : `str`, optional + Time scale (UTC, TT, TAI) of times. obscode : `int` or `str`, optional - The observatory code for ephemeris generation. Default=807 (Cerro Tololo). + The observatory code for ephemeris generation. by_object : `bool`, optional - If True (default), resulting converted ephemerides are grouped by object. + If True (default), resulting converted ephemerides are + grouped by object. If False, resulting converted ephemerides are grouped by time. eph_mode : `str`, optional Dynamical model to use for ephemeris generation - nbody or 2body. Accepts 'nbody', '2body', 'N' or '2'. Default nbody. eph_type : `str`, optional - Generate full (more data) ephemerides or basic (less data) ephemerides. + Generate full (more data) ephemerides or basic (less data) + ephemerides. Default basic. Returns @@ -547,7 +576,6 @@ def generate_ephemerides( # t = time.time() eph_times = self._convert_times(times, time_scale=time_scale) if eph_type.lower() == "basic": - # oorb_ephs = self._generate_oorb_ephs_basic(eph_times, obscode=obscode, eph_mode=eph_mode) oorb_ephs, err = oo.pyoorb.oorb_ephemeris_basic( in_orbits=self.oorb_elem, in_obscode=obscode, @@ -561,7 +589,8 @@ def generate_ephemerides( else: raise ValueError("eph_type must be full or basic") # dt, t = dtime(t) - # logging.debug("# Calculating ephemerides for %d objects over %d times required %f seconds" + # logging.debug("# Calculating ephemerides for %d objects over %d times + # required %f seconds" # % (len(self.oorb_elem), len(times), dt)) return ephs diff --git a/rubin_sim/moving_objects/orbits.py b/rubin_sim/moving_objects/orbits.py index 1ee0556a3..08c94682e 100644 --- a/rubin_sim/moving_objects/orbits.py +++ b/rubin_sim/moving_objects/orbits.py @@ -7,12 +7,12 @@ class Orbits: - """Orbits reads, checks for required values, and stores orbit parameters for moving objects. - - Instantiate the class and then use read_orbits or set_orbits to set the orbit values. + """Orbits reads, checks for required values, and stores orbit + parameters for moving objects. self.orbits stores the orbital parameters, as a pandas dataframe. - self.dataCols defines the columns required, although obj_id, H, g, and sed_filename are optional. + self.dataCols defines the columns required, + although obj_id, H, g, and sed_filename are optional. """ def __init__(self): @@ -98,11 +98,15 @@ def set_orbits(self, orbits): """Set and validate orbital parameters contain all required values. Sets self.orbits and self.orb_format. - If objid is not present in orbits, a sequential series of integers will be used. - If H is not present in orbits, a default value of 20 will be used. - If g is not present in orbits, a default value of 0.15 will be used. - If sed_filename is not present in orbits, either C or S type will be assigned, - according to the semi-major axis value. + If objid is not present in orbits, + a sequential series of integers will be used. + If H is not present in orbits, + a default value of 20 will be used. + If g is not present in orbits, + a default value of 0.15 will be used. + If sed_filename is not present in orbits, + either C or S type will be assigned according to the + semi-major axis value. Parameters ---------- @@ -117,10 +121,11 @@ def set_orbits(self, orbits): # Passed a numpy array, convert to DataFrame. orbits = pd.DataFrame.from_records(orbits) elif isinstance(orbits, np.record): - # This was a single object in a numpy array and we should be a bit fancy. + # This was a single object in a numpy array orbits = pd.DataFrame.from_records([orbits], columns=orbits.dtype.names) elif isinstance(orbits, pd.DataFrame): - # This was a pandas dataframe .. but we probably want to drop the index and recount. + # This was a pandas dataframe .. + # but we probably want to drop the index and recount. orbits.reset_index(drop=True, inplace=True) if "index" in orbits: @@ -128,7 +133,8 @@ def set_orbits(self, orbits): n_sso = len(orbits) - # Error if orbits is empty (this avoids hard-to-interpret error messages from pyoorb). + # Error if orbits is empty + # (this avoids hard-to-interpret error messages from pyoorb). if n_sso == 0: raise ValueError("Length of the orbits dataframe was 0.") @@ -138,7 +144,8 @@ def set_orbits(self, orbits): if ~(orbits["FORMAT"] == orbits["FORMAT"].iloc[0]).all(): raise ValueError("All orbital elements in the set should have the same FORMAT.") self.orb_format = orbits["FORMAT"].iloc[0] - # Backwards compatibility .. a bit. CART is deprecated, so swap it to CAR. + # Backwards compatibility .. a bit. + # CART is deprecated, so swap it to CAR. if self.orb_format == "CART": self.orb_format = "CAR" del orbits["FORMAT"] @@ -167,9 +174,11 @@ def set_orbits(self, orbits): "with columns: \n%s" % orbits.columns ) - # Check that the orbit epoch is within a 'reasonable' range, to detect possible column mismatches. + # Check that the orbit epoch is within a 'reasonable' range, + # to detect possible column mismatches. general_epoch = orbits["epoch"].head(1).values[0] - # Look for epochs between 1800 and 2200 - this is primarily to check if people used MJD (and not JD). + # Look for epochs between 1800 and 2200 - + # this is primarily to check if people used MJD (and not JD). expect_min_epoch = -21503.0 expect_max_epoch = 124594.0 if general_epoch < expect_min_epoch or general_epoch > expect_max_epoch: @@ -179,7 +188,8 @@ def set_orbits(self, orbits): % (general_epoch, expect_min_epoch, expect_max_epoch) ) - # If these columns are not available in the input data, auto-generate them. + # If these columns are not available in the input data, + # auto-generate them. if "obj_id" not in orbits: obj_id = np.arange(0, n_sso, 1) orbits = orbits.assign(obj_id=obj_id) @@ -205,8 +215,10 @@ def set_orbits(self, orbits): # All is good. self.orbits = orbits + @staticmethod def assign_sed(self, orbits, random_seed=None): - """Assign either a C or S type SED, depending on the semi-major axis of the object. + """Assign either a C or S type SED, + depending on the semi-major axis of the object. P(C type) = 0 (a<2); 0.5*a - 1 (2 4), based on figure 23 from Ivezic et al 2001 (AJ, 122, 2749). @@ -232,7 +244,7 @@ def assign_sed(self, orbits, random_seed=None): elif "q" in orbits: a = orbits["q"] / (1 - orbits["e"]) elif "x" in orbits: - # This definitely isn't right, but it's a placeholder to make it work for now. + # This isn't right, but it's a placeholder to make it work for now. a = np.sqrt(orbits["x"] ** 2 + orbits["y"] ** 2 + orbits["z"] ** 2) else: raise ValueError("Need either a or q (plus e) in orbit data frame.") @@ -250,56 +262,71 @@ def assign_sed(self, orbits, random_seed=None): return sedvals def read_orbits(self, orbit_file, delim=None, skiprows=None): - """Read orbits from a file, generating a pandas dataframe containing columns matching dataCols, - for the appropriate orbital parameter format (currently accepts COM, KEP or CAR formats). + """Read orbits from a file. + + This generates a pandas dataframe containing columns matching dataCols, + for the appropriate orbital parameter format. + (currently accepts COM, KEP or CAR formats). - After reading and standardizing the column names, calls self.set_orbits to validate the - orbital parameters. Expects angles in orbital element formats to be in degrees. + After reading and standardizing the column names, + calls self.set_orbits to validate the + orbital parameters. + Expects angles in orbital element formats to be in degrees. - Note that readOrbits uses pandas.read_csv to read the data file with the orbital parameters. + Note that readOrbits uses pandas.read_csv to read the data file + with the orbital parameters. Thus, it should have column headers specifying the column names .. unless skiprows = -1 or there is just no header line at all. - in which case it is assumed to be a standard DES format file, with no header line. + in which case it is assumed to be a standard DES format file, + with no header line. Parameters ---------- orbit_file : `str` - The name of the input file containing orbital parameter information. + The name of the input file with orbital parameter information. delim : `str`, optional - The delimiter for the input orbit file. Default is None, will use delim_whitespace=True. + The delimiter for the input orbit file. + Default is None, will use delim_whitespace=True. skiprows : `int`, optional - The number of rows to skip before reading the header information for pandas. - Default is None, which will trigger a check of the file to look for the header columns. + The number of rows to skip before reading the header information. + Default is None, which will trigger a search of the file for + the header columns. """ names = None - # If skiprows is set, then we will assume the user has handled this so that the - # first line read has the header information. - # But, if skiprows is not set, then we have to do some checking to see if there is - # header information and which row it might start in. + # If skiprows is set, then we will assume the user has + # handled this so that the first line read has the header information. + # But, if skiprows is not set, then we have to do some checking to + # see if there is header information and which row it might start in. if skiprows is None: skiprows = -1 - # Figure out whether the header is in the first line, or if there are rows to skip. - # We need to do a bit of juggling to do this before pandas reads the whole orbit file though. + # Figure out whether the header is in the first line, + # or if there are rows to skip. + # We need to do a bit of juggling to do this before pandas + # reads the whole orbit file. with open(orbit_file, "r") as fp: headervalues = None for line in fp: values = line.split() try: - # If it is a valid orbit line, we expect column 3 to be a number. + # If it is a valid orbit line, + # we expect column 3 to be a number. float(values[3]) - # And if it worked, we're done here (it's an orbit) - go on to parsing header values. + # And if it worked, we're done here (it's an orbit) - + # go on to parsing header values. break except (ValueError, IndexError): - # This wasn't a valid number or there wasn't anything in the third value. - # So this is either the header line or it's a comment line before the header columns. + # This wasn't a valid number or there wasn't + # anything in the third value. + # So this is either the header line or it's a + # comment line before the header columns. skiprows += 1 headervalues = values if headervalues is not None: # (and skiprows > -1) - # There is a header, but we also need to check if there is a comment key at the start - # of the proper header line. - # ... Because this varies as well, and is sometimes separated from header columns. + # There is a header, but we also need to check if there + # is a comment key at the start of the proper header line. + # (Because this varies as well). linestart = headervalues[0] if linestart == "#" or linestart == "!!" or linestart == "##": names = headervalues[1:] @@ -308,7 +335,8 @@ def read_orbits(self, orbit_file, delim=None, skiprows=None): # Add 1 to skiprows, so that we skip the header column line. skiprows += 1 - # So now skiprows is a value. If it is -1, then there is no header information. + # So now skiprows is a value. + # If it is -1, then there is no header information. if skiprows == -1: # No header; assume it's a typical DES file - # we'll assign the column names based on the FORMAT. @@ -374,7 +402,8 @@ def read_orbits(self, orbit_file, delim=None, skiprows=None): else: orbits = pd.read_csv(orbit_file, sep=delim, skiprows=skiprows, names=names) - # Drop some columns that are typically present in DES files but that we don't need. + # Drop some columns that are typically present in DES files + # but that we don't need. if "INDEX" in orbits: del orbits["INDEX"] if "N_PAR" in orbits: @@ -386,12 +415,13 @@ def read_orbits(self, orbit_file, delim=None, skiprows=None): if "tmp" in orbits: del orbits["tmp"] - # Normalize the column names to standard values and identify the orbital element types. + # Normalize the column names to standard values and + # identify the orbital element types. sso_cols = orbits.columns.values.tolist() # These are the alternative possibilities for various column headers # (depending on file version, origin, etc.) - # that might need remapping from the on-file values to our standardized values. + # that might need remapping to our standardized names. alt_names = {} alt_names["obj_id"] = [ "obj_id", @@ -449,7 +479,7 @@ def read_orbits(self, orbit_file, delim=None, skiprows=None): # Assign the new column names back to the orbits dataframe. orbits.columns = sso_cols - # Failing on negaitive inclinations. + # Failing on negative inclinations. if "inc" in orbits.keys(): if np.min(orbits["inc"]) < 0: negative_incs = np.where(orbits["inc"].values < 0)[0] @@ -460,10 +490,13 @@ def read_orbits(self, orbit_file, delim=None, skiprows=None): self.set_orbits(orbits) def update_orbits(self, neworb): - """Update existing orbits with new values, leaving OrbitIds, H, g, and sed_filenames in place. + """Update existing orbits with new values, + leaving OrbitIds, H, g, and sed_filenames in place. - Example use: transform orbital parameters (using PyOrbEphemerides) and then replace original values. - Example use: propagate orbital parameters (using PyOrbEphemerides) and then replace original values. + Example use: transform orbital parameters (using PyOrbEphemerides) + and then replace original values. + Example use: propagate orbital parameters (using PyOrbEphemerides) + and then replace original values. Parameters ---------- diff --git a/rubin_sim/moving_objects/pre_generate.py b/rubin_sim/moving_objects/pre_generate.py index d6e8f9fd6..fdacc95d7 100644 --- a/rubin_sim/moving_objects/pre_generate.py +++ b/rubin_sim/moving_objects/pre_generate.py @@ -7,6 +7,8 @@ from rubin_sim.moving_objects import DirectObs, Orbits if __name__ == "__main__": + """Pre-generate a series of nightly ephemerides with a 1-night timestep. + """ mjd_start = 60676.0 length = 365.25 * 12 # How long to pre-compute for dtime = 1 diff --git a/rubin_sim/moving_objects/utils.py b/rubin_sim/moving_objects/utils.py index e9fe6db9e..966cccc11 100644 --- a/rubin_sim/moving_objects/utils.py +++ b/rubin_sim/moving_objects/utils.py @@ -1,12 +1,9 @@ __all__ = ("read_observations",) import logging -import os from rubin_sim.maf.utils import get_sim_data -from .orbits import Orbits - def read_observations(simfile, colmap, constraint=None, dbcols=None): """Read the opsim database. @@ -18,16 +15,17 @@ def read_observations(simfile, colmap, constraint=None, dbcols=None): colmap : `dict` colmap dictionary (from rubin_sim.maf.batches.ColMapDict) constraint : `str`, optional - Optional SQL constraint (minus 'where') on the opsim data to read from db. + Optional SQL constraint (minus 'where') on the data to read from db. Default is None. - dbcols : `list` of `str`, optional - List of additional columns to query from the db and add to the output observations. + dbcols : `list` of [`str`], optional + List of additional columns to query from the db and add to the + output observations. Default None. Returns ------- - np.ndarray, dictionary - The OpSim data read from the database, and the dictionary mapping the column names to the data. + simdata : `np.ndarray`, (N) + The OpSim data read from the database. """ if "rotSkyPos" not in colmap: colmap["rotSkyPos"] = "rotSkyPos" diff --git a/rubin_sim/phot_utils/bandpass.py b/rubin_sim/phot_utils/bandpass.py index 8a60a12f3..2ad4225af 100644 --- a/rubin_sim/phot_utils/bandpass.py +++ b/rubin_sim/phot_utils/bandpass.py @@ -75,7 +75,7 @@ def __init__(self, wavelen=None, sb=None, sampling_warning=0.2): return def _check_wavelength_sampling(self): - """Check that the wavelength sampling is above some threshold""" + """Check that the wavelength sampling is above some threshold.""" if self.wavelen is not None: dif = np.diff(self.wavelen) if np.max(dif) > self.sampling_warning: diff --git a/rubin_sim/phot_utils/photometric_parameters.py b/rubin_sim/phot_utils/photometric_parameters.py index 479779013..0f6c5eb30 100644 --- a/rubin_sim/phot_utils/photometric_parameters.py +++ b/rubin_sim/phot_utils/photometric_parameters.py @@ -14,18 +14,21 @@ class DustValues: Parameters ---------- - R_v : float (3.1) + R_v : `float` Extinction law parameter (3.1). - bandpassDict : dict (None) - A dict with keys of filtername and values of rubin_sim.phot_utils.Bandpass objects. Default - of None will load the standard ugrizy bandpasses. - ref_ev : float (1.) + bandpassDict : `dict` + A dict with keys of filtername and values of + rubin_sim.phot_utils.Bandpass objects. + Default of None will load the standard ugrizy bandpasses. + ref_ev : `float` The reference E(B-V) value to use. Things in MAF assume 1. - Note: the value that dust_values calls "ax1" is actually equivalent to r_x in any filter. - And then it's more clear that r_x * ebv = A_x (the extinction due to dust in any bandpass). - DustValues.r_x is also provided as a copy of DustValues.ax1 .. eventually ax1 may be deprecated - in favor of r_x. + Note + ---- + The value that dust_values calls "ax1" is equivalent to r_x in any filter. + And r_x * ebv = A_x (the extinction due to dust in any bandpass). + DustValues.r_x is also provided as a copy of DustValues.ax1 .. + eventually ax1 may be deprecated in favor of r_x. """ def __init__(self, r_v=3.1, bandpass_dict=None, ref_ebv=1.0): @@ -49,7 +52,8 @@ def __init__(self, r_v=3.1, bandpass_dict=None, ref_ebv=1.0): # Add dust a, b = testsed.setup_ccm_ab() testsed.add_dust(a, b, ebv=self.ref_ebv, r_v=r_v) - # Calculate difference due to dust when EBV=1.0 (m_dust = m_nodust - Ax, Ax > 0) + # Calculate difference due to dust when EBV=1.0 + # (m_dust = m_nodust - Ax, Ax > 0) self.ax1[filtername] = testsed.calc_mag(bandpass_dict[filtername]) - flatmag # Add the R_x term, to start to transition toward this name. self.r_x = self.ax1.copy() @@ -70,7 +74,7 @@ class DefaultPhotometricParameters: Users should not access this class (which is why it is not included in the __all__ declaration for this file). - It is only used to initialize PhotometricParameters off of + It is only used to initialize PhotometricParameters for a bandpass name. """ @@ -148,76 +152,55 @@ def __init__( sigma_sys=None, bandpass=None, ): - """ - Parameters - ---------- - exptime : `Unknown` - exposure time in seconds (defaults to LSST value) - - Parameters - ---------- - nexp : `Unknown` - number of exposures (defaults to LSST value) - - Parameters - ---------- - effarea : `Unknown` - effective area in cm^2 (defaults to LSST value) - - Parameters - ---------- - gain : `Unknown` - electrons per ADU (defaults to LSST value) + """Store photometric parameters for SNR calculations. Parameters ---------- - readnoise : `Unknown` - electrons per pixel per exposure (defaults to LSST value) - - Parameters - ---------- - darkcurrent : `Unknown` - electons per pixel per second (defaults to LSST value) - - Parameters - ---------- - othernoise : `Unknown` - electrons per pixel per exposure (defaults to LSST value) - - Parameters - ---------- - platescale : `Unknown` - arcseconds per pixel (defaults to LSST value) - - Parameters - ---------- - sigma_sys : `Unknown` - systematic error in magnitudes - (defaults to LSST value) - - Parameters - ---------- - bandpass : `Unknown` - is the name of the bandpass to which these parameters - correspond. If set to an LSST bandpass, the constructor will initialize - PhotometricParameters to LSST default values for that bandpass, excepting - any parameters that have been set by hand, i.e - - myPhotParams = PhotometricParameters(nexp=3, bandpass='u') - - will initialize a PhotometricParameters object to u bandpass defaults, except - with 3 exposures instead of 2. - - If bandpass is left as None, other parameters will default to LSST r band - values (except for those values set by hand). The bandpass member variable - of PhotometricParameters will, however, remain None. + exptime : `float` + Exposure time in seconds (per exposure). + None will default to value from DefaultPhotometricParameters. + nexp : `int` + Number of exposures per visit. + None will default to value from DefaultPhotometricParameters. + effarea : `float` + Effective area in cm^2. + None will default to value from DefaultPhotometricParameters. + gain : `float` + Electrons per ADU. + None will default to value from DefaultPhotometricParameters. + readnoise : `float` + Electrons per pixel per exposure. + None will default to value from DefaultPhotometricParameters. + darkcurrent : `float` + Electons per pixel per second. + None will default to value from DefaultPhotometricParameters. + othernoise : `float` + Electrons per pixel per exposure. + None will default to value from DefaultPhotometricParameters. + platescale : `float` + Arcseconds per pixel. + None will default to value from DefaultPhotometricParameters. + sigma_sys : `float` + Systematic error in magnitudes. + None will default to value from DefaultPhotometricParameters. + bandpass : `str` + The name of the bandpass for these parameters. + If set to an LSST bandpass, the constructor will initialize + PhotometricParameters to LSST default values for that bandpass, + excepting any parameters that have been set by hand. e.g. + + >>> myPhotParams = PhotometricParameters(nexp=3, bandpass='u') + + will initialize a PhotometricParameters object to `u` band defaults, + except with 3 exposures instead of 2. A bandpass value of None + will use defaults from LSST `r` band where appropriate. """ - # readnoise, darkcurrent and othernoise are measured in electrons. # This is taken from the specifications document LSE-30 on Docushare - # Section 3.4.2.3 states that the total noise per pixel shall be 12.7 electrons per visit - # which the defaults sum to (remember to multply darkcurrent by the number - # of seconds in an exposure=15). [9 e- per 15 second exposure] + # Section 3.4.2.3 states that the total noise per pixel shall + # be 12.7 electrons per visit which the defaults sum to + # (remember to multply darkcurrent by the number of seconds + # in an exposure=15). [9 e- per 15 second exposure] self._exptime = None self._nexp = None @@ -323,7 +306,8 @@ def __init__( @property def bandpass(self): """ - The name of the bandpass associated with these parameters (can be None) + The name of the bandpass associated with these parameters. + Can be None. """ return self._bandpass diff --git a/rubin_sim/phot_utils/physical_parameters.py b/rubin_sim/phot_utils/physical_parameters.py index 8e661bf0f..78383897a 100644 --- a/rubin_sim/phot_utils/physical_parameters.py +++ b/rubin_sim/phot_utils/physical_parameters.py @@ -3,14 +3,14 @@ class PhysicalParameters: """ - A dataclass to store physical constants and other immutable parameters + Stores physical constants and other immutable parameters used by the sims_phot_utils code. """ def __init__(self): - self._lightspeed = 299792458.0 # speed of light, = 2.9979e8 m/s - self._planck = 6.626068e-27 # planck's constant, = 6.626068e-27 ergs*seconds - self._nm2m = 1.00e-9 # nanometers to meters conversion = 1e-9 m/nm + self._lightspeed = 299792458.0 # speed of light, m/s + self._planck = 6.626068e-27 # planck's constant, ergs*seconds + self._nm2m = 1.00e-9 # nanometers to meters conversion m/nm self._ergsetc2jansky = 1.00e23 # erg/cm2/s/Hz to Jansky units (fnu) @property diff --git a/rubin_sim/phot_utils/sed.py b/rubin_sim/phot_utils/sed.py index a1d9085a4..65fb401ff 100644 --- a/rubin_sim/phot_utils/sed.py +++ b/rubin_sim/phot_utils/sed.py @@ -21,69 +21,51 @@ # """ -sed - +Sed - Class data: wavelen (nm) flambda (ergs/cm^2/s/nm) fnu (Jansky) -zp (basically translates to units of fnu = -8.9 (if Janskys) or 48.6 (ergs/cm^2/s/hz)) +zp (translates to units of fnu = -8.9 (if Janskys) or 48.6 (ergs/cm^2/s/hz)) the name of the sed file -It is important to note the units are NANOMETERS, not ANGSTROMS. It is possible to rig this so you can -use angstroms instead of nm, but you should know what you're doing and understand the wavelength grid -limits applied here and in Bandpass.py. +It is important to note the units are NANOMETERS, not ANGSTROMS. Methods: -Because of how these methods will be applied for catalog generation, (taking one base SED and then -applying various dust extinctions and redshifts), many of the methods will either work on, -and update self, OR they can be given a set of lambda/flambda arrays and then will return -new versions of these arrays. In general, the methods will not explicitly set flambda or fnu to -something you (the user) did not specify - so, for example, when calculating magnitudes (which depend on -a wavelength/fnu gridded to match the given bandpass) the wavelength and fnu used are temporary copies -and the object itself is not changed. - -In general, the philosophy of Sed.py is to not define the wavelength grid for the object until necessary -(so, not until needed for the magnitude calculation or resample_sed is called). At that time the min/max/step -wavelengths or the bandpass wavelengths are used to define a new wavelength grid for the sed object. - -When considering whether to use the internal wavelen/flambda (self) values, versus input values: -For consistency, anytime self.wavelen/flambda is used, it will be updated if the values are changed -(except in the special case of calculating magnitudes), and if self.wavelen/flambda is updated, -self.fnu will be set to None. This is because many operations are typically chained together -which alter flambda -- so it is more efficient to wait and recalculate fnu at the end, plus it -avoids possible de-synchronization errors (flambda reflecting the addition of dust while fnu does -not, for example). If arrays are passed into a method, they will not be altered and the arrays -which are returned will be allocated new memory. - -Another general philosophy for Sed.py is use separate methods for items which only need to be generated once -for several objects (such as the dust A_x, b_x arrays). This allows the user to optimize their code for -faster operation, depending on what their requirements are (see example_SedBandpass_star.py and +Because of how these methods will be applied for catalog generation, +(taking one base SED and then applying various dust extinctions and redshifts), +many of the methods will either work on, and update self, OR they can be given +a set of lambda/flambda arrays and then will return new versions of +these arrays. In general, the methods will not explicitly set flambda or fnu to +something you (the user) did not specify. So, for example, when calculating +magnitudes (which depend on a wavelength/fnu gridded to match the given +bandpass) the wavelength and fnu used are temporary copies and the object +itself is not changed. + +In general, the philosophy of Sed.py is to not define the wavelength +grid for the object until necessary (so, not until needed for the +magnitude calculation or resample_sed is called). At that time the min/max/step +wavelengths or the bandpass wavelengths are used to define a new wavelength +grid for the sed object. + +When considering whether to use the internal wavelen/flambda (self) values, +versus input values: +For consistency, anytime self.wavelen/flambda is used, it will be updated +if the values are changed (except in the special case of calculating +magnitudes), and if self.wavelen/flambda is updated, self.fnu will be set to +None. This is because many operations are typically chained together +which alter flambda -- so it is more efficient to wait and recalculate fnu at +the end, plus it avoids possible de-synchronization errors +(flambda reflecting the addition of dust while fnu does +not, for example). If arrays are passed into a method, they will not be +altered and the arrays which are returned will be allocated new memory. + +Another general philosophy for Sed.py is use separate methods for items which +only need to be generated once for several objects (such as the dust A_x, b_x +arrays). This allows the user to optimize their code for faster operation, +depending on what their requirements are (see example_SedBandpass_star.py and exampleSedBandpass_galaxy for examples). - -Method include: -* setSED / set_flat_sed / readSED_flambda / readSED_fnu -- to input information into Sed wavelen/flambda. -* getSED_flambda / getSED_fnu -- to return wavelen / flambda or fnu to the user. -* clearSED -- set everything to 0. -* synchronizeSED -- to calculate wavelen/flambda/fnu on the desired grid and calculate fnu. -* _checkUseSelf/needResample -- not expected to be useful to the user, rather intended for internal use. -* resample_sed -- primarily internal use, but may be useful to user. Resamples SED onto specified grid. -* flambda_tofnu / fnuToflambda -- conversion methods, does not affect wavelen gridding. -* redshift_sed -- redshifts the SED, optionally adding dimmingx -* (setupODonnell_ab or setup_ccm_ab) / add_dust -- separated into two components, -so that a_x/b_x can be reused between SEDS -if the wavelength range and grid is the same for each SED (calculate a_x/b_x with either setupODonnell_ab -or setup_ccm_ab). -* multiplySED -- multiply two SEDS together. -* calc_adu / calc_mag / calcFlux -- with a Bandpass, calculate the ADU/magnitude/flux of a SED. -* calcFluxNorm / multiply_flux_norm -- handle fluxnorm parameters (from UW LSST database) properly. - These methods are intended to give a user an easy way to scale an SED to match an expected magnitude. -* renormalizeSED -- intended for rescaling SEDS to a common flambda or fnu level. -* writeSED -- keep a file record of your SED. -* setPhiArray -- given a list of bandpasses, sets up the 2-d phiArray (for many_mag_calc) and dlambda value. -* many_mag_calc -- given 2-d phiArray and dlambda, this will return an array of magnitudes (in the same -order as the bandpasses) of this SED in each of those bandpasses. - """ __all__ = ("Sed", "cache_lsst_seds", "read_close__kurucz") @@ -221,12 +203,15 @@ def _generate_sed_cache(cache_dir, cache_name): Parameters ---------- - cache_dir is the directory where the cache will be created - cache_name is the name of the cache to be created + cache_dir : `str` + The directory where the cache will be created. + cache_name : `str` + The name of the cache to be created. Returns ------- - The dict of SEDs (keyed to their full file name) + cache : `dict` [`str`, `Sed`] + The dict of SEDs (keyed to their full file name). """ sed_root = os.path.join(get_data_dir(), "sims_sed_library") dtype = numpy.dtype([("wavelen", float), ("flambda", float)]) @@ -290,49 +275,37 @@ def cache_lsst_seds(wavelen_min=None, wavelen_max=None, cache_dir=None): Read all of the SEDs in sims_sed_library into a dict. Pickle the dict and store it in phot_utils/cacheDir/lsst_sed_cache.p for future use. - After the file has initially been created, the next time you run this script, - it will just use pickle to load the dict. + After the file has initially been created, + the next time you run this script, it will just use the pickle. Once the dict is loaded, Sed.read_sed_flambda() will be able to read any LSST-shipped SED directly from memory, rather than using I/O to read it from an ASCII file stored on disk. Note: the dict of cached SEDs will take up about 5GB on disk. Once loaded, - the cache will take up about 1.5GB of memory. The cache takes about 14 minutes - to generate and about 51 seconds to load on a 2014 Mac Book Pro. + the cache will take up about 1.5GB of memory. Parameters ----------- wavelen_min : `float` + Wavelength minimum value to store for each Sed. wavelen_max : `float` + Wavelength maximum value to store for each Sed. + cache_dir : `str` + The directory to place the cache pickle. - if either of these are not None, then every SED in the cache will be + If either of wavelen_min or wavelen_max are not None, + then every SED in the cache will be truncated to only include the wavelength range (in nm) between wavelen_min and wavelen_max - - cache_dir is a string indicating the directory in which to search for/write - the cache. If set to None, the cache will be in - $SIMS_SED_LIBRARY_DIR/lsst_sed_cache_dir/, which may be write-protected on - shared installations of the LSST stack. Defaults to None. """ global _global_lsst_sed_cache - try: - sed_cache_name = os.path.join("lsst_sed_cache_%d.p" % sys.version_info.major) - sed_dir = os.path.join(get_data_dir(), "sims_sed_library") - if cache_dir is None: - cache_dir = os.path.join(get_data_dir(), "sims_sed_library", "lsst_sed_cache_dir") - - except Exception: - print( - "An exception was raised related to sims_sed_library. If you did not " - "install sims_phot_utils with a full LSST simulations stack, you cannot " - "load and generate the cache of LSST SEDs. If you did install the full sims " - "stack but are getting this message, please check that sims_sed_library is " - "actually setup and active in your environment." - ) - return + sed_cache_name = os.path.join("lsst_sed_cache_%d.p" % sys.version_info.major) + sed_dir = os.path.join(get_data_dir(), "sims_sed_library") + if cache_dir is None: + cache_dir = os.path.join(get_data_dir(), "sims_sed_library", "lsst_sed_cache_dir") if not os.path.exists(cache_dir): os.mkdir(cache_dir) @@ -475,10 +448,11 @@ def __ne__(self, other): def set_sed(self, wavelen, flambda=None, fnu=None, name="FromArray"): """ - Populate wavelen/flambda fields in sed by giving lambda/flambda or lambda/fnu array. + Populate wavelen/flambda fields in sed by giving lambda/flambda + or lambda/fnu array. - If flambda present, this overrides fnu. Method sets fnu=None unless only fnu is given. - Sets wavelen/flambda or wavelen/flambda/fnu over wavelength array given. + If flambda present, this overrides fnu. + Method sets fnu=None unless only fnu is given. """ # Check wavelen array for type matches. if isinstance(wavelen, numpy.ndarray) is False: @@ -507,7 +481,8 @@ def set_sed(self, wavelen, flambda=None, fnu=None, name="FromArray"): def set_flat_sed(self, wavelen_min=300.0, wavelen_max=1150.0, wavelen_step=0.1, name="Flat"): """ - Populate the wavelength/flambda/fnu fields in sed according to a flat fnu source. + Populate the wavelength/flambda/fnu fields in sed according to a + flat fnu source. """ self.wavelen = numpy.arange(wavelen_min, wavelen_max + wavelen_step, wavelen_step, dtype="float") @@ -518,7 +493,8 @@ def set_flat_sed(self, wavelen_min=300.0, wavelen_max=1150.0, wavelen_step=0.1, def read_sed_flambda(self, filename, name=None, cache_sed=True): """ - Read a file containing [lambda Flambda] (lambda in nm) (Flambda erg/cm^2/s/nm). + Read a file containing [lambda Flambda] + (lambda in nm) (Flambda erg/cm^2/s/nm). Does not resample wavelen/flambda onto grid; leave fnu=None. """ @@ -526,7 +502,8 @@ def read_sed_flambda(self, filename, name=None, cache_sed=True): global _global_misc_sed_cache # Try to open data file. - # ASSUME that if filename ends with '.gz' that the file is gzipped. Otherwise, regular file. + # ASSUME that if filename ends with '.gz' that the file is gzipped. + # Otherwise, regular file. if filename.endswith(".gz"): gzipped_filename = filename unzipped_filename = filename[:-3] @@ -552,7 +529,8 @@ def read_sed_flambda(self, filename, name=None, cache_sed=True): sourceflambda = numpy.copy(cached_source[1]) if cached_source is None: - # Read source SED from file - lambda, flambda should be first two columns in the file. + # Read source SED from file - lambda, flambda should be first + # two columns in the file. # lambda should be in nm and flambda should be in ergs/cm2/s/nm dtype = numpy.dtype([("wavelen", float), ("flambda", float)]) try: @@ -561,9 +539,6 @@ def read_sed_flambda(self, filename, name=None, cache_sed=True): try: data = numpy.genfromtxt(unzipped_filename, dtype=dtype) except Exception as err: - # see - # http://stackoverflow.com/questions/ - # 9157210/how-do-i-raise-the-same-exception-with-a-custom-message-in-python new_args = [ err.args[0] + "\n\nError reading sed file %s; " % filename + "it may not exist." ] @@ -594,7 +569,8 @@ def read_sed_flambda(self, filename, name=None, cache_sed=True): def read_sed_fnu(self, filename, name=None): """ - Read a file containing [lambda Fnu] (lambda in nm) (Fnu in Jansky). + Read a file containing [lambda Fnu] + (lambda in nm) (Fnu in Jansky). Does not resample wavelen/fnu/flambda onto a grid; leaves fnu set. """ @@ -613,7 +589,8 @@ def read_sed_fnu(self, filename, name=None): f = gzip.open(filename + ".gz", "rt") except IOError: raise IOError("The throughput file %s does not exist" % (filename)) - # Read source SED from file - lambda, fnu should be first two columns in the file. + # Read source SED from file - lambda, fnu should be + # the first two columns in the file. # lambda should be in nm and fnu should be in Jansky. sourcewavelen = [] sourcefnu = [] @@ -656,7 +633,8 @@ def get_sed_fnu(self): else: # Fnu was not set .. grab copy fnu without changing self. wavelen, fnu = self.flambda_tofnu(self.wavelen, self.flambda) - # Now wavelen/fnu (new mem) are gridded evenly, but self.wavelen/flambda/fnu remain unchanged. + # Now wavelen/fnu (new mem) are gridded evenly, + # but self.wavelen/flambda/fnu remain unchanged. return wavelen, fnu # Methods that update or change self. @@ -676,7 +654,8 @@ def synchronize_sed(self, wavelen_min=None, wavelen_max=None, wavelen_step=None) """ Set all wavelen/flambda/fnu values, potentially on min/max/step grid. - Uses flambda to recalculate fnu. If wavelen min/max/step are given, resamples + Uses flambda to recalculate fnu. + If wavelen min/max/step are given, resamples wavelength/flambda/fnu onto an even grid with these values. """ # Grid wavelength/flambda/fnu if desired. @@ -694,13 +673,15 @@ def synchronize_sed(self, wavelen_min=None, wavelen_max=None, wavelen_step=None) def _check_use_self(self, wavelen, flux): """ - Simple utility to check if should be using self's data or passed arrays. + Simple utility to check if should be using self's data or + passed arrays. Also does data integrity check on wavelen/flux if not self. """ update_self = False if (wavelen is None) or (flux is None): - # Then one of the arrays was not passed - check if this is true for both arrays. + # Then one of the arrays was not passed - + # check if this is true for both arrays. if (wavelen is not None) or (flux is not None): # Then one of the arrays was passed - raise exception. raise ValueError("Must either pass *both* wavelen/flux pair, or use defaults.") @@ -722,7 +703,8 @@ def _need_resample( wavelen_step=None, ): """ - Check if wavelen or self.wavelen matches wavelen or wavelen_min/max/step grid. + Check if wavelen or self.wavelen matches wavelen + or wavelen_min/max/step grid. """ # Check if should use self or passed wavelen. if wavelen is None: @@ -736,8 +718,10 @@ def _need_resample( need_regrid = numpy.any(abs(wavelen_match - wavelen) > 1e-10) else: need_regrid = True - # Check if wavelen_min/max/step are set - if ==None, then return (no regridding). - # It's possible (writeSED) to call this routine, even with no final grid in mind. + # Check if wavelen_min/max/step are set - + # if ==None, then return (no regridding). + # It's possible (writeSED) to call this routine, + # even with no final grid in mind. if (wavelen_min is None) and (wavelen_max is None) and (wavelen_step is None): need_regrid = False else: @@ -750,7 +734,8 @@ def _need_resample( stepsize = numpy.unique(numpy.diff(wavelen)) if (len(stepsize) == 1) and (stepsize[0] == wavelen_step): need_regrid = False - # At this point, need_grid=True unless it's proven to be False, so return value. + # At this point, need_grid=True unless it's proven to be False, + # so return value. return need_regrid def resample_sed( @@ -764,12 +749,15 @@ def resample_sed( force=False, ): """ - Resample flux onto grid defined by min/max/step OR another wavelength array. + Resample flux onto grid defined by min/max/step OR + another wavelength array. Give method wavelen/flux OR default to self.wavelen/self.flambda. - Method either returns wavelen/flambda (if given those arrays) or updates wavelen/flambda in self. + Method either returns wavelen/flambda (if given those arrays) or + updates wavelen/flambda in self. If updating self, resets fnu to None. - Method will first check if resampling needs to be done or not, unless 'force' is True. + Method will first check if resampling needs to be done or not, + unless 'force' is True. """ # Check if need resampling: if force or ( @@ -781,7 +769,8 @@ def resample_sed( wavelen_step=wavelen_step, ) ): - # Is method acting on self.wavelen/flambda or passed in wavelen/flux arrays? + # Is method acting on self.wavelen/flambda or + # passed in wavelen/flux arrays? update_self = self._check_use_self(wavelen, flux) if update_self: wavelen = self.wavelen @@ -797,7 +786,8 @@ def resample_sed( ) else: wavelen_grid = numpy.copy(wavelen_match) - # Check if the wavelength range desired and the wavelength range of the object overlap. + # Check if the wavelength range desired and the wavelength + # range of the object overlap. # If there is any non-overlap, raise warning. if (wavelen.max() < wavelen_grid.max()) or (wavelen.min() > wavelen_grid.min()): warnings.warn( @@ -805,7 +795,8 @@ def resample_sed( + " (%.2f to %.2f)" % (wavelen_grid.min(), wavelen_grid.max()) + "and sed %s (%.2f to %.2f)" % (self.name, wavelen.min(), wavelen.max()) ) - # Do the interpolation of wavelen/flux onto grid. (type/len failures will die here). + # Do the interpolation of wavelen/flux onto grid. + # (type/len failures will die here). if wavelen[0] > wavelen_grid[0] or wavelen[-1] < wavelen_grid[-1]: f = interpolate.interp1d(wavelen, flux, bounds_error=False, fill_value=numpy.NaN) flux_grid = f(wavelen_grid) @@ -828,12 +819,15 @@ def flambda_tofnu(self, wavelen=None, flambda=None): """ Convert flambda into fnu. - This routine assumes that flambda is in ergs/cm^s/s/nm and produces fnu in Jansky. - Can act on self or user can provide wavelen/flambda and get back wavelen/fnu. + This routine assumes that flambda is in ergs/cm^s/s/nm and + produces fnu in Jansky. + Can act on self or user can provide wavelen/flambda and + get back wavelen/fnu. """ # Change Flamda to Fnu by multiplying Flambda * lambda^2 = Fv # Fv dv = Fl dl .. Fv = Fl dl / dv = Fl dl / (dl*c/l/l) = Fl*l*l/c - # Check - Is the method acting on self.wavelen/flambda/fnu or passed wavelen/flambda arrays? + # Check - Is the method acting on self.wavelen/flambda/fnu + # or passed wavelen/flambda arrays? update_self = self._check_use_self(wavelen, flambda) if update_self: wavelen = self.wavelen @@ -843,7 +837,8 @@ def flambda_tofnu(self, wavelen=None, flambda=None): # Calculate fnu. fnu = flambda * wavelen * wavelen * self._phys_params.nm2m / self._phys_params.lightspeed fnu = fnu * self._phys_params.ergsetc2jansky - # If are using/updating self, then *all* wavelen/flambda/fnu will be gridded. + # If are using/updating self, then *all* wavelen/flambda/fnu + # will be gridded. # This is so wavelen/fnu AND wavelen/flambda can be kept in sync. if update_self: self.wavelen = wavelen @@ -858,7 +853,8 @@ def fnu_toflambda(self, wavelen=None, fnu=None): Convert fnu into flambda. Assumes fnu in units of Jansky and flambda in ergs/cm^s/s/nm. - Can act on self or user can give wavelen/fnu and get wavelen/flambda returned. + Can act on self or user can give wavelen/fnu and + get wavelen/flambda returned. """ # Fv dv = Fl dl .. Fv = Fl dl / dv = Fl dl / (dl*c/l/l) = Fl*l*l/c # Is method acting on self or passed arrays? @@ -886,7 +882,8 @@ def redshift_sed(self, redshift, dimming=False, wavelen=None, flambda=None): """ Redshift an SED, optionally adding cosmological dimming. - Pass wavelen/flambda or redshift/update self.wavelen/flambda (unsets fnu). + Pass wavelen/flambda or redshift/update self.wavelen/flambda + (unsets fnu). """ # Updating self or passed arrays? update_self = self._check_use_self(wavelen, flambda) @@ -922,7 +919,8 @@ def setup_cc_mab(self, wavelen=None): """ Calculate a(x) and b(x) for CCM dust model. (x=1/wavelen). - If wavelen not specified, calculates a and b on the own object's wavelength grid. + If wavelen not specified, calculates a and b on the own object's + wavelength grid. Returns a(x) and b(x) can be common to many seds, wavelen is the same. This method sets up extinction due to the model of @@ -939,17 +937,20 @@ def setup_ccm_ab(self, wavelen=None): """ Calculate a(x) and b(x) for CCM dust model. (x=1/wavelen). - If wavelen not specified, calculates a and b on the own object's wavelength grid. + If wavelen not specified, calculates a and b on the own object's + wavelength grid. Returns a(x) and b(x) can be common to many seds, wavelen is the same. This method sets up extinction due to the model of Cardelli, Clayton and Mathis 1989 (ApJ 345, 245) """ # This extinction law taken from Cardelli, Clayton and Mathis ApJ 1989. - # The general form is A_l / A(V) = a(x) + b(x)/R_V (where x=1/lambda in microns), - # then different values for a(x) and b(x) depending on wavelength regime. + # The general form is A_l / A(V) = a(x) + b(x)/R_V + # (where x=1/lambda in microns), + # then different values for a(x) and b(x) depending on wavelength. # Also, the extinction is parametrized as R_v = a_v / E(B-V). - # Magnitudes of extinction (A_l) translates to flux by a_l = -2.5log(f_red / f_nonred). + # Magnitudes of extinction (A_l) translates to flux by + # a_l = -2.5log(f_red / f_nonred). if wavelen is None: wavelen = numpy.copy(self.wavelen) a_x = numpy.zeros(len(wavelen), dtype="float") @@ -999,16 +1000,19 @@ def setup_o_donnell_ab(self, wavelen=None): """ Calculate a(x) and b(x) for O'Donnell dust model. (x=1/wavelen). - If wavelen not specified, calculates a and b on the own object's wavelength grid. + If wavelen not specified, calculates a and b on the own object's + wavelength grid. Returns a(x) and b(x) can be common to many seds, wavelen is the same. - This method sets up the extinction parameters from the model of O'Donnel 1994 - (ApJ 422, 158) + This method sets up the extinction parameters from the model of + O'Donnel 1994 (ApJ 422, 158) """ - # The general form is A_l / A(V) = a(x) + b(x)/R_V (where x=1/lambda in microns), - # then different values for a(x) and b(x) depending on wavelength regime. + # The general form is A_l / A(V) = a(x) + b(x)/R_V + # (where x=1/lambda in microns), + # then different values for a(x) and b(x) depending on wavelength. # Also, the extinction is parametrized as R_v = a_v / E(B-V). - # Magnitudes of extinction (A_l) translates to flux by a_l = -2.5log(f_red / f_nonred). + # Magnitudes of extinction (A_l) translates to flux by + # a_l = -2.5log(f_red / f_nonred). if wavelen is None: wavelen = numpy.copy(self.wavelen) a_x = numpy.zeros(len(wavelen), dtype="float") @@ -1084,10 +1088,13 @@ def add_dust(self, a_x, b_x, a_v=None, ebv=None, r_v=3.1, wavelen=None, flambda= self._ln10_04 = 0.4 * numpy.log(10.0) # The extinction law taken from Cardelli, Clayton and Mathis ApJ 1989. - # The general form is A_l / A(V) = a(x) + b(x)/R_V (where x=1/lambda in microns). - # Then, different values for a(x) and b(x) depending on wavelength regime. + # The general form is A_l / A(V) = a(x) + b(x)/R_V + # (where x=1/lambda in microns). + # Then, different values for a(x) and b(x) depending on wavelength + # regime. # Also, the extinction is parametrized as r_v = a_v / E(B-V). - # The magnitudes of extinction (A_l) translates to flux by a_l = -2.5log(f_red / f_nonred). + # The magnitudes of extinction (A_l) translates to flux by + # a_l = -2.5log(f_red / f_nonred). # # Figure out if updating self or passed arrays. update_self = self._check_use_self(wavelen, flambda) @@ -1098,7 +1105,8 @@ def add_dust(self, a_x, b_x, a_v=None, ebv=None, r_v=3.1, wavelen=None, flambda= else: wavelen = numpy.copy(wavelen) flambda = numpy.copy(flambda) - # Input parameters for reddening can include any of 3 parameters; only 2 are independent. + # Input parameters for reddening can include any of 3 parameters; + # only 2 are independent. # Figure out what parameters were given, and see if self-consistent. if r_v == 3.1: if a_v is None: @@ -1119,7 +1127,8 @@ def add_dust(self, a_x, b_x, a_v=None, ebv=None, r_v=3.1, wavelen=None, flambda= # r_v and a_v values are specified or calculated. a_lambda = (a_x + b_x / r_v) * a_v - # dmag_red(dust) = -2.5 log10 (f_red / f_nored) : (f_red / f_nored) = 10**-0.4*dmag_red + # dmag_red(dust) = -2.5 log10 (f_red / f_nored) : + # (f_red / f_nored) = 10**-0.4*dmag_red dust = numpy.exp(-a_lambda * self._ln10_04) flambda *= dust # Update self if required. @@ -1130,16 +1139,19 @@ def add_dust(self, a_x, b_x, a_v=None, ebv=None, r_v=3.1, wavelen=None, flambda= def multiply_sed(self, other_sed, wavelen_step=None): """ - Multiply two SEDs together - flambda * flambda - and return a new sed object. + Multiply two SEDs together - flambda * flambda - + and return a new sed object. - Unless the two wavelength arrays are equal, returns a SED gridded with stepsize wavelen_step + Unless the two wavelength arrays are equal, returns a SED + gridded with stepsize wavelen_step over intersecting wavelength region. Does not alter self or other_sed. """ if wavelen_step is None: wavelen_step = self._phys_params.wavelenstep - # Check if the wavelength arrays are equal (in which case do not resample) + # Check if the wavelength arrays are equal + # (in which case do not resample) if numpy.all(self.wavelen == other_sed.wavelen): flambda = self.flambda * other_sed.flambda new_sed = Sed(self.wavelen, flambda=flambda) @@ -1167,7 +1179,8 @@ def multiply_sed(self, other_sed, wavelen_step=None): ) # Multiply the two flambda together. flambda = flambda_1 * flambda_2 - # Instantiate new sed object. wavelen_1 == wavelen_2 as both are on grid. + # Instantiate new sed object. + # wavelen_1 == wavelen_2 as both are on grid. new_sed = Sed(wavelen_1, flambda) return new_sed @@ -1177,8 +1190,10 @@ def calc_adu(self, bandpass, phot_params, wavelen=None, fnu=None): """ Calculate the number of adu from camera, using sb and fnu. - Given wavelen/fnu arrays or use self. Self or passed wavelen/fnu arrays will be unchanged. - Calculating the AB mag requires the wavelen/fnu pair to be on the same grid as bandpass; + Given wavelen/fnu arrays or use self. + Self or passed wavelen/fnu arrays will be unchanged. + Calculating the AB mag requires the wavelen/fnu pair to be + on the same grid as bandpass; (temporary values of these are used). Parameters @@ -1190,8 +1205,8 @@ def calc_adu(self, bandpass, phot_params, wavelen=None, fnu=None): fnu : `np.ndarray`, optional flux in Janskys - If wavelen and fnu are not specified, this will just use self.wavelen and - self.fnu + If wavelen and fnu are not specified, this will just use self.wavelen + and self.fnu """ use_self = self._check_use_self(wavelen, fnu) @@ -1220,16 +1235,16 @@ def calc_adu(self, bandpass, phot_params, wavelen=None, fnu=None): def flux_from_mag(self, mag): """ - Convert a magnitude back into a flux (implies knowledge of the zeropoint, which is - stored in this class) + Convert a magnitude back into a flux (implies knowledge of the + zeropoint, which is stored in this class) """ return numpy.power(10.0, -0.4 * (mag + self.zp)) def mag_from_flux(self, flux): """ - Convert a flux into a magnitude (implies knowledge of the zeropoint, which is stored - in this class) + Convert a flux into a magnitude (implies knowledge of the + zeropoint, which is stored in this class) """ return -2.5 * numpy.log10(flux) - self.zp @@ -1272,33 +1287,30 @@ def calc_ergs(self, bandpass): def calc_flux(self, bandpass, wavelen=None, fnu=None): """ - Integrate the specific flux density of the object over the normalized response - curve of a bandpass, giving a flux in Janskys (10^-23 ergs/s/cm^2/Hz) through - the normalized response curve, as detailed in Section 4.1 of the LSST design - document LSE-180 and Section 2.6 of the LSST Science Book - (http://ww.lsst.org/scientists/scibook). This flux in Janskys (which is usually - though of as a unit of specific flux density), should be considered a weighted - average of the specific flux density over the normalized response curve of the - bandpass. Because we are using the normalized response curve (phi in LSE-180), - this quantity will depend only on the shape of the response curve, not its - absolute normalization. - - Note: the way that the normalized response curve has been defined (see equation - 5 of LSE-180) is appropriate for photon-counting detectors, not calorimeters. - - Passed wavelen/fnu arrays will be unchanged, but if uses self will check if fnu is set. - - Calculating the AB mag requires the wavelen/fnu pair to be on the same grid as bandpass; + Integrate the specific flux density of the object over the normalized + response curve of a bandpass, giving a flux in Janskys + (10^-23 ergs/s/cm^2/Hz) through the normalized response curve, as + detailed in Section 4.1 of the LSST design document LSE-180 and + Section 2.6 of the LSST Science Book + (http://ww.lsst.org/scientists/scibook). + This flux in Janskys (which is usually thought of as a unit of + specific flux density), should be considered a weighted average of + the specific flux density over the normalized response curve of the + bandpass. Because we are using the normalized response curve + (phi in LSE-180), this quantity will depend only on the shape of the + response curve, not its absolute normalization. + + Note: the way that the normalized response curve has been defined + (see equation 5 of LSE-180) is appropriate for photon-counting + detectors, not calorimeters. + + Passed wavelen/fnu arrays will be unchanged, but if uses self will + check if fnu is set. + + Calculating the AB mag requires the wavelen/fnu pair to be on the + same grid as bandpass; (temporary values of these are used). """ - # Note - the behavior in this first section might be considered a little odd. - # However, I felt calculating a magnitude should not (unexpectedly) regrid your - # wavelen/flambda information if you were using self., as this is not obvious from the "outside". - # To preserve 'user logic', the wavelen/flambda of self are left untouched. Unfortunately - # this means, this method can be used inefficiently if calculating many magnitudes with - # the same sed and same bandpass region - in that case, use self.synchronize_sed() with - # the wavelen min/max/step set to the bandpass min/max/step first .. - # then you can calculate multiple magnitudes much more efficiently! use_self = self._check_use_self(wavelen, fnu) # Use self values if desired, otherwise use values passed to function. if use_self: @@ -1318,11 +1330,12 @@ def calc_flux(self, bandpass, wavelen=None, fnu=None): def calc_mag(self, bandpass, wavelen=None, fnu=None): """ - Calculate the AB magnitude of an object using the normalized system response (phi from Section - 4.1 of the LSST design document LSE-180). + Calculate the AB magnitude of an object using the normalized system + response (phi from Section 4.1 of the LSST design document LSE-180). - Can pass wavelen/fnu arrays or use self. Self or passed wavelen/fnu arrays will be unchanged. - Calculating the AB mag requires the wavelen/fnu pair to be on the same grid as bandpass; + Can pass wavelen/fnu arrays or use self. Self or passed wavelen/fnu + arrays will be unchanged. Calculating the AB mag requires the + wavelen/fnu pair to be on the same grid as bandpass; (but only temporary values of these are used). """ flux = self.calc_flux(bandpass, wavelen=wavelen, fnu=fnu) @@ -1333,10 +1346,11 @@ def calc_mag(self, bandpass, wavelen=None, fnu=None): def calc_flux_norm(self, magmatch, bandpass, wavelen=None, fnu=None): """ - Calculate the fluxNorm (SED normalization value for a given mag) for a sed. + Calculate the fluxNorm (SED normalization value for a given mag) + for a sed. - Equivalent to adjusting a particular f_nu to Jansky's appropriate for the desired mag. - Can pass wavelen/fnu or apply to self. + Equivalent to adjusting a particular f_nu to Jansky's appropriate + for the desired mag. Can pass wavelen/fnu or apply to self. """ use_self = self._check_use_self(wavelen, fnu) if use_self: @@ -1345,7 +1359,8 @@ def calc_flux_norm(self, magmatch, bandpass, wavelen=None, fnu=None): self.flambda_tofnu() wavelen = self.wavelen fnu = self.fnu - # Fluxnorm gets applied to f_nu (fluxnorm * SED(f_nu) * PHI = mag - 8.9 (AB zeropoint). + # Fluxnorm gets applied to f_nu + # (fluxnorm * SED(f_nu) * PHI = mag - 8.9 (AB zeropoint). # FluxNorm * SED => correct magnitudes for this object. # Calculate fluxnorm. curmag = self.calc_mag(bandpass, wavelen, fnu) @@ -1360,7 +1375,8 @@ def multiply_flux_norm(self, flux_norm, wavelen=None, fnu=None): Multiply wavelen/fnu (or self.wavelen/fnu) by fluxnorm. Returns wavelen/fnu arrays (or updates self). - Note that multiply_flux_norm does not regrid self.wavelen/flambda/fnu at all. + Note that multiply_flux_norm does not regrid self.wavelen/flambda/fnu + at all. """ # Note that flux_norm is intended to be applied to f_nu, # so that fluxnorm*fnu*phi = mag (expected magnitude). @@ -1399,16 +1415,20 @@ def renormalize_sed( wavelen_step=None, ): """ - Renormalize sed in flambda to have normflux=normvalue @ lambdanorm or averaged over gap. + Renormalize sed in flambda to have normflux=normvalue @ lambdanorm + averaged over gap. - Can normalized in flambda or fnu values. wavelen_step specifies the wavelength spacing - when using 'gap'. + Can normalized in flambda or fnu values. wavelen_step specifies + the wavelength spacing when using 'gap'. Either returns wavelen/flambda values or updates self. """ - # Normalizes the fnu/flambda SED at one wavelength or average value over small range (gap). - # This is useful for generating SED catalogs, mostly, to make them match schema. - # Do not use this for calculating specific magnitudes -- use calcfluxNorm and multiply_flux_norm. + # Normalizes the fnu/flambda SED at one wavelength or average value + # over small range (gap). + # This is useful for generating SED catalogs, mostly, to make them + # match schema. + # Do not use this for calculating specific magnitudes -- use + # calcfluxNorm and multiply_flux_norm. # Start normalizing wavelen/flambda. if wavelen_step is None: @@ -1426,13 +1446,14 @@ def renormalize_sed( if flambda is None: if fnu is None: raise Exception("If passing wavelength, must also pass fnu or flambda.") - # If not given flambda, must calculate from the given values of fnu. + # If not given flambda, must calculate from fnu. wavelen, flambda = self.fnu_toflambda(wavelen, fnu) # Make a copy of the input data. else: flambda = numpy.copy(flambda) # Calculate renormalization values. - # Check that flambda is defined at the wavelength want to use for renormalization. + # Check that flambda is defined at the wavelength want to use for + # renormalization. if (lambdanorm > wavelen.max()) or (lambdanorm < wavelen.min()): raise Exception( "Desired wavelength for renormalization, %f, " % (lambdanorm) @@ -1447,7 +1468,8 @@ def renormalize_sed( flambda_atpt = numpy.zeros(len(lambdapt), dtype="float") flambda_atpt = numpy.interp(lambdapt, wavelen, flambda, left=None, right=None) gapval = flambda_atpt.sum() / len(lambdapt) - # Now renormalize fnu and flambda in the case of normalizing flambda. + # Now renormalize fnu and flambda, in the case of normalizing + # flambda. if gapval == 0: raise Exception( "Original flambda is 0 at the desired point of normalization. " "Cannot renormalize." @@ -1474,7 +1496,8 @@ def renormalize_sed( else: fnu = numpy.copy(fnu) # Calculate renormalization values. - # Check that flambda is defined at the wavelength want to use for renormalization. + # Check that flambda is defined at the wavelength want to use + # for renormalization. if (lambdanorm > wavelen.max()) or (lambdanorm < wavelen.min()): raise Exception( "Desired wavelength for renormalization, %f, " % (lambdanorm) @@ -1552,14 +1575,18 @@ def write_sed( f.close() return - # Bonus, functions for many-magnitude calculation for many SEDs with a single bandpass + # Bonus, functions for many-magnitude calculation for many SEDs with + # a single bandpass def setup_phi_array(self, bandpasslist): """ - Sets up a 2-d numpy phi array from bandpasslist suitable for input to Sed's many_mag_calc. + Sets up a 2-d numpy phi array from bandpasslist suitable for input + to Sed's many_mag_calc. - This is intended to be used once, most likely before using Sed's many_mag_calc many times on many SEDs. - Returns 2-d phi array and the wavelen_step (dlambda) appropriate for that array. + This is intended to be used once, most likely before using Sed's + many_mag_calc many times on many SEDs. + Returns 2-d phi array and the wavelen_step (dlambda) appropriate for + that array. """ # Calculate dlambda for phi array. wavelen_step = bandpasslist[0].wavelen[1] - bandpasslist[0].wavelen[0] @@ -1598,12 +1625,12 @@ def many_flux_calc(self, phiarray, wavelen_step, observed_bandpass_ind=None): Parameters ---------- - phiarray: `np.ndarray`, mandatory + phiarray : `np.ndarray` phiarray corresponding to the list of bandpasses in which the band - fluxes need to be calculated, in the same wavelength grid as the SED - wavelen_step: `float`, mandatory + fluxes need to be calculated, in the same wavelength grid as Sed + wavelen_step : `float` the uniform grid size of the SED - observed_bandpass_ind: list of integers, optional, defaults to None + observed_bandpass_ind : `list` [`int`], optional list of indices of phiarray corresponding to observed bandpasses, if None, the original phiarray is returned @@ -1612,22 +1639,22 @@ def many_flux_calc(self, phiarray, wavelen_step, observed_bandpass_ind=None): `np.ndarray` with size equal to number of bandpass filters band flux values in units of ergs/cm^2/sec - .. note: Sed.many_flux_calc `assumes` phiArray has the same wavelenghth + .. note: Sed.many_flux_calc `assumes` phiArray has the same wavelength grid as the Sed and that `sed.fnu` has been calculated for the sed, perhaps using `sed.flambda_tofnu()`. This requires calling `sed.setupPhiArray()` first. These assumptions are to avoid error checking within this function (for speed), but could lead to errors if method is used incorrectly. - Note on units: Fluxes calculated this way will be the flux density integrated over the - weighted response curve of the bandpass. See equaiton 2.1 of the LSST Science Book + Note on units: Fluxes calculated this way will be the flux density + integrated over the weighted response curve of the bandpass. + See equaiton 2.1 of the LSST Science Book http://www.lsst.org/scientists/scibook """ if observed_bandpass_ind is not None: phiarray = phiarray[observed_bandpass_ind] - flux = numpy.empty(len(phiarray), dtype="float") flux = numpy.sum(phiarray * self.fnu, axis=1) * wavelen_step return flux @@ -1635,22 +1662,23 @@ def many_mag_calc(self, phiarray, wavelen_step, observed_bandpass_ind=None): """ Calculate many magnitudes for many bandpasses using a single sed. - This method assumes that there will be flux within a particular bandpass + This method assumes that there will be flux within a particular + bandpass (could return '-Inf' for a magnitude if there is none). Use setupPhiArray first, and note that Sed.many_mag_calc *assumes* phiArray has the same wavelength grid as the Sed, and that fnu has already been calculated for Sed. - These assumptions are to avoid error checking within this function (for - speed), but could lead to errors if method is used incorrectly. + These assumptions are to avoid error checking within this function + (for speed), but could lead to errors if method is used incorrectly. Parameters ---------- - phiarray: `np.ndarray`, mandatory + phiarray : `np.ndarray`, mandatory phiarray corresponding to the list of bandpasses in which the band - fluxes need to be calculated, in the same wavelength grid as the SED - wavelen_step: `float`, mandatory + fluxes need to be calculated, in the same wavelength grid as SED + wavelen_step : `float`, mandatory the uniform grid size of the SED - observed_bandpass_ind: list of integers, optional, defaults to None + observed_bandpass_ind : `list` [`int`], optional list of indices of phiarray corresponding to observed bandpasses, if None, the original phiarray is returned @@ -1662,23 +1690,26 @@ def many_mag_calc(self, phiarray, wavelen_step, observed_bandpass_ind=None): def read_close__kurucz(teff, fe_h, logg): """ - Check the cached Kurucz models and load the model closest to the input stellar parameters. + Check the cached Kurucz models and load the model closest to the + input stellar parameters. Parameters are matched in order of Teff, fe_h, and logg. Parameters ---------- - teff : float - Effective temperature of the stellar template. Reasonable range is 3830-11,100 K. - fe_h : float + teff : `float` + Effective temperature of the stellar template. + Reasonable range is 3830-11,100 K. + fe_h : `float` Metallicity [Fe/H] of stellar template. Values in range -5 to 1. - logg : float - Log of the surface gravity for the stellar template. Values in range 0. to 50. + logg : `float` + Log of the surface gravity for the stellar template. + Values in range 0. to 50. Returns ------- - sed : Sed Object + sed : `rubin_sim.phot_utils.Sed` The SED of the closest matching stellar template - paramDict : dict + paramDict : `dict` Dictionary of the teff, fe_h, logg that were actually loaded """ diff --git a/rubin_sim/phot_utils/signaltonoise.py b/rubin_sim/phot_utils/signaltonoise.py index 807cf6e03..706f9a52d 100644 --- a/rubin_sim/phot_utils/signaltonoise.py +++ b/rubin_sim/phot_utils/signaltonoise.py @@ -13,6 +13,7 @@ "mag_error_from_snr", "calc_mag_error_m5", "calc_mag_error_sed", + "scale_sky_m5" ) import numpy @@ -100,7 +101,7 @@ def calc_instr_noise_sq(phot_params): Parameters ---------- - phot_params : `PhotometricParameters` + phot_params : `rubin_sim.phot_utils.PhotometricParameters` A PhotometricParameters object that carries details about the photometric response of the telescope. @@ -128,14 +129,14 @@ def calc_total_non_source_noise_sq(sky_sed, hardwarebandpass, phot_params, fwhm_ Parameters ---------- - sky_sed : `Sed` + sky_sed : `rubin_sim.phot_utils.Sed` A Sed object representing the sky (normalized so that sky_sed.calc_mag() gives the sky brightness in magnitudes per square arcsecond) - hardwarebandpass : `Bandpass` + hardwarebandpass : `rubin_sim.phot_utils.Bandpass` A Bandpass object containing just the instrumentation throughputs (no atmosphere) - phot_params : `PhotometricParameters` + phot_params : `rubin_sim.phot_utils.PhotometricParameters` A PhotometricParameters object containing information about the photometric properties of the telescope. fwhm_eff : `float` @@ -189,10 +190,10 @@ def calc_sky_counts_per_pixel_for_m5(m5target, total_bandpass, phot_params, fwhm ---------- m5target : `float` The desired value of m5. - total_bandpass : `Bandpass` + total_bandpass : `rubin_sim.phot_utils.Bandpass` A bandpass object representing the total throughput of the telescope (instrumentation plus atmosphere). - phot_params : `PhotometricParameters` + phot_params : `rubin_sim.phot_utils.PhotometricParameters` A photometric parameters object containing the photometric response information for Rubin. fwhm_eff : `float` @@ -247,21 +248,21 @@ def calc_sky_counts_per_pixel_for_m5(m5target, total_bandpass, phot_params, fwhm def calc_m5(skysed, total_bandpass, hardware, phot_params, fwhm_eff=0.83): - """Calculate the AB magnitude of a 5-sigma source above sky background source. + """Calculate the AB magnitude of a 5-sigma source above sky background. Parameters ---------- - skysed : `Sed` + skysed : `rubin_sim.phot_utils.Sed` An SED representing the sky background emission, normalized such that skysed.calc_mag(Bandpass) returns the expected sky brightness in magnitudes per sq arcsecond. - total_bandpass : `Bandpass` + total_bandpass : `rubin_sim.phot_utils.Bandpass` The Bandpass representing the total throughput of the telescope (instrument plus atmosphere). - hardware : `Bandpass` + hardware : `rubin_sim.phot_utils.Bandpass` The Bandpass representing the throughput of the telescope instrument only (no atmosphere). - phot_params : `PhotometricParameters` + phot_params : `rubin_sim.phot_utils.PhotometricParameters` The PhotometricParameters class that carries details about the photometric response of the telescope. fwhm_eff : `float` @@ -396,12 +397,12 @@ def calc_snr_m5(magnitude, bandpass, m5, phot_params, gamma=None): ---------- magnitude : `float` or `np.ndarray`, (N,) Magnitudes of the sources whose signal to noise you are calculating. - bandpass : `Bandpass` + bandpass : `rubin_sim.phot_utils.Bandpass` The Bandpass in which the magnitude was calculated (total instrument + atmosphere). m5 : `float` The 5-sigma point source limiting magnitude of the exposure. - phot_params : `PhotometricParameters` + phot_params : `rubin_sim.phot_utils.PhotometricParameters` The PhotometricParameters class that carries details about the photometric response of the telescope. gamma : `float`, opt @@ -438,12 +439,12 @@ def calc_mag_error_m5(magnitude, bandpass, m5, phot_params, gamma=None): ---------- magnitude : `float` Magnitude of the source. - bandpass : `Bandpass` + bandpass : `rubin_sim.phot_utils.Bandpass` The Bandpass in which to calculate the magnitude error (total instrument + atmosphere). m5 : `float` The 5-sigma point source limiting magnitude. - phot_params : `PhotometricParameters` + phot_params : `rubin_sim.phot_utils.PhotometricParameters` The PhotometricParameters class that carries details about the photometric response of the telescope. gamma : `float`, optional @@ -486,20 +487,20 @@ def calc_snr_sed( Parameters ---------- - source_sed : `Sed` + source_sed : `rubin_sim.phot_utils.Sed` A SED representing the source, normalized such that source_sed.calc_mag gives the desired magnitude. - total_bandpass : `Bandpass` + total_bandpass : `rubin_sim.phot_utils.Bandpass` The Bandpass representing the total throughput of the telescope (instrument plus atmosphere). - sky_sed : `Sed` + sky_sed : `rubin_sim.phot_utils.Sed` A SED representing the sky background emission, normalized such that skysed.calc_mag(Bandpass) returns the expected sky brightness in magnitudes per sq arcsecond. - hardware : `Bandpass` + hardware : `rubin_sim.phot_utils.Bandpass` The Bandpass representing the throughput of the telescope instrument only (no atmosphere). - phot_params : `PhotometricParameters` + phot_params : `rubin_sim.phot_utils.PhotometricParameters` The PhotometricParameters class that carries details about the photometric response of the telescope. fwhm_eff : `float` @@ -562,20 +563,20 @@ def calc_mag_error_sed( Parameters ---------- - source_sed : `Sed` + source_sed : `rubin_sim.phot_utils.Sed` A SED representing the source, normalized such that source_sed.calc_mag gives the desired magnitude. - total_bandpass : `Bandpass` + total_bandpass : `rubin_sim.phot_utils.Bandpass` The Bandpass representing the total throughput of the telescope (instrument plus atmosphere). - sky_sed : `Sed` + sky_sed : `rubin_sim.phot_utils.Sed` A SED representing the sky background emission, normalized such that skysed.calc_mag(Bandpass) returns the expected sky brightness in magnitudes per sq arcsecond. - hardware_bandpass : `Bandpass` + hardware_bandpass : `rubin_sim.phot_utils.Bandpass` The Bandpass representing the throughput of the telescope instrument only (no atmosphere). - phot_params : `PhotometricParameters` + phot_params : `rubin_sim.phot_utils.PhotometricParameters` The PhotometricParameters class that carries details about the photometric response of the telescope. fwhm_eff : `float` @@ -635,15 +636,18 @@ def calc_astrometric_error(mag, m5, fwhm_geom=0.7, nvisit=1, systematic_floor=10 astrom_err : `float` Astrometric error for a given SNR, in mas. """ - # The astrometric error can be applied to parallax or proper motion (for n_visit>1). - # If applying to proper motion, should also divide by the # of years of the survey. + # The astrometric error can be applied to parallax or proper motion + # (for n_visit>1). + # If applying to proper motion, should also divide by the # of years + # of the survey. # This is also referenced in the astroph/0805.2366 paper. - # D. Monet suggests sqrt(Nvisit/2) for first 3 years, sqrt(N) for longer, in reduction of error - # because of the astrometric measurement method, the systematic and random error are both reduced. + # D. Monet suggests sqrt(Nvisit/2) for first 3 years, sqrt(N) for longer, + # in reduction of error. # Zeljko says 'be conservative', so removing this reduction for now. rgamma = 0.039 xval = numpy.power(10, 0.4 * (mag - m5)) - # The average fwhm_eff is 0.7" (or 700 mas), but user can specify. Convert to mas. + # The average fwhm_eff is 0.7" (or 700 mas), but user can specify. + # Convert to mas. seeing = fwhm_geom * 1000.0 error_rand = seeing * numpy.sqrt((0.04 - rgamma) * xval + rgamma * xval * xval) error_rand = error_rand / numpy.sqrt(nvisit) @@ -651,3 +655,40 @@ def calc_astrometric_error(mag, m5, fwhm_geom=0.7, nvisit=1, systematic_floor=10 error_sys = systematic_floor astrom_error = numpy.sqrt(error_sys * error_sys + error_rand * error_rand) return astrom_error + +def scale_sky_m5(m5target, skysed, total_bandpass, hardware, phot_params, fwhm_eff=0.83): + """ + Take an SED representing the sky and normalize it so that + m5 (the magnitude at which an object is detected in this + bandpass at 5-sigma) is set to some specified value. + + The 5-sigma limiting magnitude (m5) for an observation is + determined by a combination of the telescope and camera parameters + (such as diameter of the mirrors and the readnoise) together with the + sky background. This method (set_m5) scales a provided sky background + Sed so that an observation would have a target m5 value, for the + provided hardware parameters. Using the resulting Sed in the + 'calcM5' method will return this target value for m5. + + Note that the returned SED will be renormalized such that calling the + method self.calcADU(hardwareBandpass) on it will yield the number of + counts per square arcsecond in a given bandpass. + """ + + # This is based on the LSST SNR document (v1.2, May 2010) + # www.astro.washington.edu/users/ivezic/Astr511/LSST_SNRdoc.pdf + + sky_counts_target = calc_sky_counts_per_pixel_for_m5( + m5target, total_bandpass, fwhm_eff=fwhm_eff, phot_params=phot_params + ) + + sky_sed_out = Sed(wavelen=numpy.copy(skysed.wavelen), flambda=numpy.copy(skysed.flambda)) + + sky_counts = ( + sky_sed_out.calc_adu(hardware, phot_params=phot_params) + * phot_params.platescale + * phot_params.platescale + ) + sky_sed_out.multiply_flux_norm(sky_counts_target / sky_counts) + + return sky_sed_out \ No newline at end of file diff --git a/rubin_sim/phot_utils/utils/__init__.py b/rubin_sim/phot_utils/utils/__init__.py deleted file mode 100644 index 865d0d8d3..000000000 --- a/rubin_sim/phot_utils/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .test_utils import * diff --git a/rubin_sim/phot_utils/utils/test_utils.py b/rubin_sim/phot_utils/utils/test_utils.py deleted file mode 100644 index b242da9d9..000000000 --- a/rubin_sim/phot_utils/utils/test_utils.py +++ /dev/null @@ -1,255 +0,0 @@ -""" -This file defines some test catalog and DBObject classes for use with unit tests. - -To date (30 October 2014) testPhotometry.py and testCosmology.py import from this module -""" -__all__ = ("set_m5", "comoving_distance_integrand", "cosmological_omega") - -import numpy - -from rubin_sim.phot_utils import Sed, calc_sky_counts_per_pixel_for_m5 - - -def set_m5(m5target, skysed, total_bandpass, hardware, phot_params, fwhm_eff=0.83): - """ - Take an SED representing the sky and normalize it so that - m5 (the magnitude at which an object is detected in this - bandpass at 5-sigma) is set to some specified value. - - The 5-sigma limiting magnitude (m5) for an observation is - determined by a combination of the telescope and camera parameters - (such as diameter of the mirrors and the readnoise) together with the - sky background. This method (set_m5) scales a provided sky background - Sed so that an observation would have a target m5 value, for the - provided hardware parameters. Using the resulting Sed in the - 'calcM5' method will return this target value for m5. - - Parameters - ---------- - the : `Unknown` - desired value of m5 - - Parameters - ---------- - skysed : `Unknown` - is an instantiation of the Sed class representing - sky emission - - Parameters - ---------- - total_bandpass : `Unknown` - is an instantiation of the Bandpass class - representing the total throughput of the telescope (instrumentation - plus atmosphere) - - Parameters - ---------- - hardware : `Unknown` - is an instantiation of the Bandpass class representing - the throughput due solely to instrumentation. - - Parameters - ---------- - phot_params : `Unknown` - is an instantiation of the - PhotometricParameters class that carries details about the - photometric response of the telescope. - - Parameters - ---------- - fwhm_eff : `Unknown` - in arcseconds - - Parameters - ---------- - returns : `Unknown` - an instantiation of the Sed class that is the skysed renormalized - so that m5 has the desired value. - - Note that the returned SED will be renormalized such that calling the method - self.calcADU(hardwareBandpass) on it will yield the number of counts per square - arcsecond in a given bandpass. - """ - - # This is based on the LSST SNR document (v1.2, May 2010) - # www.astro.washington.edu/users/ivezic/Astr511/LSST_SNRdoc.pdf - - sky_counts_target = calc_sky_counts_per_pixel_for_m5( - m5target, total_bandpass, fwhm_eff=fwhm_eff, phot_params=phot_params - ) - - sky_sed_out = Sed(wavelen=numpy.copy(skysed.wavelen), flambda=numpy.copy(skysed.flambda)) - - sky_counts = ( - sky_sed_out.calc_adu(hardware, phot_params=phot_params) - * phot_params.platescale - * phot_params.platescale - ) - sky_sed_out.multiply_flux_norm(sky_counts_target / sky_counts) - - return sky_sed_out - - -def cosmological_omega(redshift, h0, om0, ode0=None, og0=0.0, onu0=0.0, w0=-1.0, wa=0.0): - """ - A method to compute the evolution of the Hubble and density parameters - with redshift (as a baseline against which to test the cosmology unittest) - - Parameters - ---------- - redshift : `Unknown` - is the redshift at which the output is desired - - Parameters - ---------- - h0 : `Unknown` - is the Hubble parameter at the present epoch in km/s/Mpc - - Parameters - ---------- - om0 : `Unknown` - is the density parameter (fraction of critical) for matter at the - present epoch - - Parameters - ---------- - ode0 : `Unknown` - is the density parameter for Dark Energy at the present epoch. - If left as None, will be set to 1.0-om0-og0-onu0 (i.e. a flat universe) - - Parameters - ---------- - og0 : `Unknown` - is the density parameter for photons at the present epoch - - Parameters - ---------- - onu0 : `Unknown` - is the density parameter for neutrinos at the present epoch - (assume massless neutrinos) - - Parameters - ---------- - w0 : `Unknown` - is a parameter for calculating the equation of state for Dark Energy - w = w0 + wa * z/(1 + z) - - Parameters - ---------- - wa : `Unknown` - is the other parameter for calculating the equation of state for Dark - Energy - - Returns - ------- - Unknown: `Unknown` - Hubble parameter at desired redshift (in km/s/Mpc) - - Returns - ------- - Unknown: `Unknown` - matter density Parameter at desired redshift - - Returns - ------- - Unknown: `Unknown` - Dark Energy density parameter at desired redshift - - Returns - ------- - Unknown: `Unknown` - photon density parameter at desired redshift - - Returns - ------- - Unknown: `Unknown` - neutrino density parameter at desired redshift - - Returns - ------- - Unknown: `Unknown` - curvature density parameter at desired redshift - """ - - if ode0 is None: - ode0 = 1.0 - om0 - og0 - onu0 - - ok0 = 1.0 - om0 - ode0 - og0 - onu0 - - aa = 1.0 / (1.0 + redshift) - omz = om0 * numpy.power(1.0 + redshift, 3) - ogz = og0 * numpy.power(1.0 + redshift, 4) - onuz = onu0 * numpy.power(1.0 + redshift, 4) - okz = ok0 * numpy.power(1.0 + redshift, 2) - odez = ode0 * numpy.exp(-3.0 * (numpy.log(aa) * (w0 + wa + 1.0) - wa * (aa - 1.0))) - - ototal = omz + ogz + onuz + odez + okz - - return ( - h0 * numpy.sqrt(ototal), - omz / ototal, - odez / ototal, - ogz / ototal, - onuz / ototal, - okz / ototal, - ) - - -def comoving_distance_integrand(redshift, h0, om0, ode0, og0, onu0, w0, wa): - """ - The integrand of comoving distance (as a baseline for cosmology unittest) - - Parameters - ---------- - redshift : `Unknown` - is the redshift at which to evaluate the integrand - - Parameters - ---------- - h0 : `Unknown` - is the Hubble parameter at the present epoch in km/s/Mpc - - Parameters - ---------- - om0 : `Unknown` - is the density parameter (fraction of critical) for matter at the - present epoch - - Parameters - ---------- - ode0 : `Unknown` - is the density parameter for Dark Energy at the present epoch. - - Parameters - ---------- - og0 : `Unknown` - is the density parameter for photons at the present epoch - - Parameters - ---------- - onu0 : `Unknown` - is the density parameter for neutrinos at the present epoch - (assume massless neutrinos) - - Parameters - ---------- - w0 : `Unknown` - is a parameter for calculating the equation of state for Dark Energy - w = w0 + wa * z/(1 + z) - - Parameters - ---------- - wa : `Unknown` - is the other parameter for calculating the equation of state for Dark - Energy - - Returns - ------- - Unknown: `Unknown` - 1/(Hubble parameter at desired redshift in km/s/Mpc) - - """ - hh, mm, de, gg, nn, kk = cosmological_omega( - redshift, h0, om0, ode0=ode0, og0=og0, onu0=onu0, w0=w0, wa=wa - ) - return 1.0 / hh diff --git a/rubin_sim/satellite_constellations/model_observatory.py b/rubin_sim/satellite_constellations/model_observatory.py index 6e8b4ebca..1a54c70cc 100644 --- a/rubin_sim/satellite_constellations/model_observatory.py +++ b/rubin_sim/satellite_constellations/model_observatory.py @@ -1,7 +1,7 @@ __all__ = ("ModelObservatory",) import numpy as np -from rubin_scheduler.scheduler.model_observatory import ModelObservatory as OMO +from rubin_scheduler.scheduler.model_observatory import ModelObservatory as oMO from rubin_scheduler.site_models import Almanac from rubin_scheduler.utils import _healbin, survey_start_mjd @@ -9,7 +9,7 @@ # subclass to expand to include satellite constellations -class ModelObservatory(OMO): +class ModelObservatory(oMO): """A class to generate a realistic telemetry stream for the scheduler Parameters diff --git a/rubin_sim/satellite_constellations/sat_utils.py b/rubin_sim/satellite_constellations/sat_utils.py index 7e288d66f..aed47d425 100644 --- a/rubin_sim/satellite_constellations/sat_utils.py +++ b/rubin_sim/satellite_constellations/sat_utils.py @@ -27,7 +27,7 @@ def sun_alt_limits(): Returns ------- - sun_alt_limits : `dict` {`str`: `float`} + sun_alt_limits : `dict` [`str`: `float`] Dict with satellite constellation name keys, altitude limits values (degrees). """ @@ -81,8 +81,9 @@ def tle_from_orbital_parameters(sat_name, sat_nr, epoch, inclination, raan, mean Notes ----- - epoch has the format: first two digits are the year, next three - digits are the day from beginning of year, then fraction of a day is given, e.g. + epoch has the format: first two digits are the year, + next three digits are the day from beginning of year, + then fraction of a day is given, e.g. 20180.25 would be 2020, day 180, 6 hours (UT?) """ @@ -130,13 +131,13 @@ def create_constellation( Parameters ---------- - altitudes : `np.ndarray` + altitudes : `np.ndarray`, (N,) Altitudes (degrees). - inclinations : `np.ndarray` + inclinations : `np.ndarray`, (N,) Inclinations (degrees). - nplanes : `np.ndarray` + nplanes : `np.ndarray`, (N,) Number of satellite planes. - sats_per_plane : `np.ndarray` + sats_per_plane : `np.ndarray`, (N,) Number of satellites per orbital plane. epoch : `float` Epoch. @@ -182,7 +183,7 @@ def starlink_tles_v1(): Returns ------- - my_sat_tles : `list` of `str` + my_sat_tles : `list` [`str`] """ altitudes = np.array([550, 540, 570, 560, 560]) * u.km inclinations = np.array([53, 53.2, 70, 97.6, 97.6]) * u.deg @@ -200,7 +201,7 @@ def starlink_tles_v2(): Returns ------- - my_sat_tles : `list` of `str` + my_sat_tles : `list` [`str`] """ altitudes = np.array([340, 345, 350, 360, 525, 530, 535, 604, 614]) * u.km inclinations = np.array([53, 46, 38, 96.9, 53, 43, 33, 148, 115.7]) * u.deg @@ -218,7 +219,7 @@ def oneweb_tles(): Returns ------- - my_sat_tles : `list` of `str` + my_sat_tles : `list` [`str`] """ altitudes = np.array([1200, 1200, 1200]) * u.km inclinations = np.array([87.9, 40, 55]) * u.deg @@ -236,7 +237,7 @@ class Constellation: Parameters ---------- - sat_tle_list : `list` of `str` + sat_tle_list : `list` [`str`] A list of satellite TLEs to be used alt_limit : `float` Altitude limit below which satellites can be ignored (degrees) @@ -267,6 +268,11 @@ def _make_location(self): def update_mjd(self, mjd): """Calculate and record the alt/az position and illumination status for all the satellites at a given time. + + Parameters + ---------- + mjd : `float` + New MJD. """ jd = mjd + MJDOFFSET t = self.ts.ut1_jd(jd) @@ -300,18 +306,18 @@ def paths_array(self, mjds): Parameters ---------- - mjds : `np.ndarray` + mjds : `np.ndarray`, (N,) Modified Julian Dates. Returns ------- - ras : `np.ndarray` + ras : `np.ndarray`, (N,) RAs at each MJD - decs : `np.ndarray` + decs : `np.ndarray`, (N,) Decs at each MJD - alts : `np.ndarray` + alts : `np.ndarray`, (N,) Altitudes at each MJD - illums : `np.ndarray` + illums : `np.ndarray`, (N,) Array of bools for if satellite is illuminated """ @@ -348,13 +354,13 @@ def check_pointings( Parameters ---------- - pointing_ras : `np.ndarray` + pointing_ras : `np.ndarray`, (N,) The RA for each pointing (degrees). - pointing_decs : `np.ndarray`` + pointing_decs : `np.ndarray`, (N,) The dec for each pointing (degrees). - mjds : `np.ndarray` + mjds : `np.ndarray`, (N,) The MJD for the (start) of each pointing (days). - visit_time : `np.ndarray` + visit_time : `np.ndarray`, (N,) The start to end time for a visit (seconds). fov_radius : `float` The radius of the science field of view (degrees) @@ -457,9 +463,9 @@ def _streak_length(sat_ras, sat_decs, pointing_ra, pointing_dec, radius): Parameters ---------- - sat_ras : `np.ndarray` + sat_ras : `np.ndarray`, (N,) RA for each satellite (radians). - sat_decs : `np.ndarray` + sat_decs : `np.ndarray`, (N,) Decs for the satelltes (radians). pointing_ra : `float` RA of the pointing (radians). diff --git a/rubin_sim/scheduler/__init__.py b/rubin_sim/scheduler/__init__.py index 38a636501..38f78be99 100644 --- a/rubin_sim/scheduler/__init__.py +++ b/rubin_sim/scheduler/__init__.py @@ -1,6 +1,6 @@ import warnings -from rubin_scheduler.scheduler import * +from rubin_scheduler.scheduler import * #noqa: F403 warnings.simplefilter("default") warnings.warn("rubin_sim.scheduler is deprecated, switch to rubin_scheduler.scheduler", DeprecationWarning) diff --git a/rubin_sim/utils/__init__.py b/rubin_sim/utils/__init__.py index 6ce84a18f..783608b51 100644 --- a/rubin_sim/utils/__init__.py +++ b/rubin_sim/utils/__init__.py @@ -1,6 +1,6 @@ import warnings -from rubin_scheduler.utils import * +from rubin_scheduler.utils import * #noqa: F403 warnings.simplefilter("default") warnings.warn("rubin_sim.utils is deprecated, switch to rubin_scheduler.utils", DeprecationWarning) diff --git a/tests/phot_utils/test_snr.py b/tests/phot_utils/test_snr.py index 02667d03b..aa2cde532 100644 --- a/tests/phot_utils/test_snr.py +++ b/tests/phot_utils/test_snr.py @@ -6,7 +6,7 @@ import rubin_sim.phot_utils.signaltonoise as snr from rubin_sim.phot_utils import Bandpass, PhotometricParameters, Sed -from rubin_sim.phot_utils.utils import set_m5 +from rubin_sim.phot_utils import scale_sky_m5 class TestSNRmethods(unittest.TestCase): @@ -201,7 +201,7 @@ def test_systematic_uncertainty(self): sky_dummy = Sed() sky_dummy.read_sed_flambda(os.path.join(get_data_dir(), "throughputs", "baseline", "darksky.dat")) - normalized_sky_dummy = set_m5( + normalized_sky_dummy = scale_sky_m5( m5, sky_dummy, bp, @@ -259,7 +259,7 @@ def test_no_systematic_uncertainty(self): sky_dummy = Sed() sky_dummy.read_sed_flambda(os.path.join(get_data_dir(), "throughputs", "baseline", "darksky.dat")) - normalized_sky_dummy = set_m5( + normalized_sky_dummy = scale_sky_m5( m5, sky_dummy, bp, From 32e2ac0aafc0f4292ca80c564c04c178043043ca Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sat, 13 Jan 2024 19:14:12 -0800 Subject: [PATCH 06/26] Add site_models deprecation warning --- rubin_sim/site_models/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 rubin_sim/site_models/__init__.py diff --git a/rubin_sim/site_models/__init__.py b/rubin_sim/site_models/__init__.py new file mode 100644 index 000000000..a49d3efb3 --- /dev/null +++ b/rubin_sim/site_models/__init__.py @@ -0,0 +1,8 @@ +import warnings + +from rubin_scheduler.site_models import * # noqa: F403 + +warnings.simplefilter("default") +warnings.warn( + "rubin_sim.site_models is deprecated, switch to rubin_scheduler.site_models", DeprecationWarning +) From 6ade799621bc5be34d8c00b5fbd59ee41d0b4a44 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sat, 13 Jan 2024 20:35:22 -0800 Subject: [PATCH 07/26] Ruff and docstrings in skybrightness --- rubin_sim/skybrightness/__init__.py | 10 +- rubin_sim/skybrightness/allsky_db.py | 27 ++- rubin_sim/skybrightness/generate_hdf5.py | 84 ++++---- rubin_sim/skybrightness/interp_components.py | 168 +++++++++------ rubin_sim/skybrightness/sky_model.py | 202 +++++++++++-------- rubin_sim/skybrightness/twilight_func.py | 24 ++- rubin_sim/skybrightness/utils.py | 6 +- 7 files changed, 311 insertions(+), 210 deletions(-) diff --git a/rubin_sim/skybrightness/__init__.py b/rubin_sim/skybrightness/__init__.py index fd1248d6b..637b9806a 100644 --- a/rubin_sim/skybrightness/__init__.py +++ b/rubin_sim/skybrightness/__init__.py @@ -1,5 +1,5 @@ -from .allsky_db import * -from .interp_components import * -from .sky_model import * -from .twilight_func import * -from .utils import * +from .allsky_db import * # noqa: F403 +from .interp_components import * # noqa: F403 +from .sky_model import * # noqa: F403 +from .twilight_func import * # noqa: F403 +from .utils import * # noqa: F403 diff --git a/rubin_sim/skybrightness/allsky_db.py b/rubin_sim/skybrightness/allsky_db.py index 2d8d1e2bb..0fcc39871 100644 --- a/rubin_sim/skybrightness/allsky_db.py +++ b/rubin_sim/skybrightness/allsky_db.py @@ -6,13 +6,33 @@ import sqlalchemy as sqla from rubin_scheduler.data import get_data_dir -# Tools for using an all-sky sqlite DB with cannon and photodiode data from the site. +# Tools for using an all-sky sqlite DB with cannon +# and photodiode data from the site. def all_sky_db(date_id, sql_q=None, dtypes=None, db_address=None, filt="R"): """ - Take in a date_id (that corresponds to a single MJD, and - return the star and sky magnitudes in a numpy structured array. + Fetch star and sky magnitudes from a processed all-sky sqlite database. + + Parameters + ---------- + date_id : `float` + Date (MJD) to fetch star observation information from the database. + sql_q : `str` + Sql query to use. None will use a default query to get all star info. + dtypes : `list` [`str`, `dtype`] + Data types expected from the database. None will use the defaults. + db_address : `str` + Database data path. Default uses db in $RUBIN_SIM_DATA/skybrightness. + filt : `str` + Filter in which to fetch stellar observation data. + + Returns + ------- + data : `np.ndarray`, (N,) + Stellar observation data. + mjd : `float` + MJD of the observations. """ if db_address is None: data_path = os.path.join(get_data_dir(), "skybrightness") @@ -46,6 +66,7 @@ def all_sky_db(date_id, sql_q=None, dtypes=None, db_address=None, filt="R"): def diode_sky_db(mid_mjd, sql_q=None, dtypes=None, db_address=None, clean=True): + """Fetch diode measurements of skybrightness.""" if db_address is None: data_path = os.getenv("SIMS_SKYBRIGHTNESS_DATA_DIR") db_address = "sqlite:///" + os.path.join(data_path, "photometry", "skydata.sqlite") diff --git a/rubin_sim/skybrightness/generate_hdf5.py b/rubin_sim/skybrightness/generate_hdf5.py index caa1565b4..a2b9dd5a2 100644 --- a/rubin_sim/skybrightness/generate_hdf5.py +++ b/rubin_sim/skybrightness/generate_hdf5.py @@ -8,7 +8,7 @@ from astropy.time import Time import rubin_sim.skybrightness as sb -import rubin_sim.utils as utils +import rubin_scheduler.utils as utils def generate_sky( @@ -24,7 +24,6 @@ def generate_sky( dm=0.2, airmass_limit=2.5, alt_limit=86.5, - requireStride=3, verbose=True, ): """ @@ -32,51 +31,54 @@ def generate_sky( Parameters ---------- - mjd0 : float (9560.2) + mjd0 : `float` The starting MJD time - duration : float + duration : `float` The length of time to generate sky maps for (years) - timestep : float (5.) + timestep : `float` The timestep between sky maps (minutes) - timestep_max : float (20.) - The maximum alowable timestep (minutes) - outfile : str + timestep_max : `float` + The maximum allowable timestep (minutes) + outfile : `str` The name of the output file to save the results in - nside : in (32) - The nside to run the healpixel map at - sunLimit : float (-12) - The maximum altitude of the sun to try and generate maps for. MJDs with a higher - sun altitude are dropped - fieldID : bool (False) - If True, computes sky magnitudes at OpSim field locations. If False - computes at healpixel centers. - airmass_overhead : float - The airmass region to demand sky models are well matched before dropping - and assuming the timestep can be interpolated - dm : float - If a skymap can be interpolated from neighboring maps with precision dm, - that mjd is dropped. - airmass_limit : float - Pixels with an airmass greater than airmass_limit are masked + nside : `int` + The nside to create the healpixel maps. + Default of 32 matches expectation of rubin_scheduler. + sunLimit : `float` + The maximum altitude of the sun to try and generate maps for. + MJDs with a higher sun altitude are dropped. + fieldID : `bool` + If True, computes sky magnitudes at OpSim field locations. + If False computes at healpixel centers. + airmass_overhead : `float` + The airmass region to demand sky models are well matched before + dropping and assuming the timestep can be interpolated. + dm : `float` + If a skymap can be interpolated from neighboring maps with + precision dm, that mjd is dropped. + airmass_limit : `float` + Pixels with an airmass greater than airmass_limit are masked. moon_dist_limit : float - Pixels (fields) closer than moon_dist_limit (degrees) are masked - planet_dist_limit : float (2.) - Pixels (fields) closer than planet_dist_limit (degrees) to Venus, Mars, Jupiter, or Saturn are masked - alt_limit : float (86.5) - Altitude limit of the telescope (degrees). Altitudes higher than this are masked. - requireStride : int (3) - Require every nth mjd. Makes it possible to easily select an evenly spaced number states of a pixel. + Pixels (fields) closer than moon_dist_limit (degrees) are masked. + planet_dist_limit : `float` + Pixels (fields) closer than planet_dist_limit (degrees) to Venus, + Mars, Jupiter, or Saturn are masked. + alt_limit : `float` + Altitude limit of the telescope (degrees). + Altitudes higher than this are masked. Returns ------- - dict_of_lists : dict + dict_of_lists : `dict` includes key-value pairs: - mjds : the MJD at every computation. Not evenly spaced as no computations. + mjds : the MJD at every computation. Not necessarily evenly spaced. airmass : the airmass maps for each MJD - masks : The `bool` mask map for each MJD (True means the pixel should be masked) + masks : The `bool` mask map for each MJD + (True means the pixel should be masked) sunAlts : The sun altitude at each MJD - sky_brightness : dict - Has keys for each u,g,r,i,z,y filter. Each one is a 2-d array with dimensions of healpix ID and + sky_brightness : `dict` + Has keys for each u,g,r,i,z,y filter. + Each one is a 2-d array with dimensions of healpix ID and mjd (matched to the mjd list above). """ @@ -150,8 +152,8 @@ def generate_sky( if np.size(dict_of_lists["mjds"]) > 3: if dict_of_lists["mjds"][-2] not in required_mjds: - # Check if we can interpolate the second to last sky brightnesses - + # Check if we can interpolate the second to + # last sky brightnesses if dict_of_lists["mjds"][-1] - dict_of_lists["mjds"][-3] < timestep_max: can_interp = True for mjd2 in last_5_mjds: @@ -189,7 +191,7 @@ def generate_sky( version = rubin_sim.version.__version__ fingerprint = version - # Generate a header to save all the kwarg info for how this run was computed + # Generate a header to save all the kwarg info for this run header = { "mjd0": mjd0, "mjd_max": mjd_max, @@ -216,6 +218,7 @@ def generate_sky( hf.create_dataset("mjds", data=final_mjds) hf.create_dataset("sky_mags", data=final_sky_mags, compression="gzip") hf.create_dataset("timestep_max", data=timestep_max) + hf.attrs.update(header) hf.close() @@ -240,6 +243,7 @@ def generate_sky( count = 0 for mjd1, mjd2 in zip(mjds[:-1], mjds[1:]): print("Generating file %i" % count) - # generate_sky(mjd0=mjd1, mjd_max=mjd2+day_pad, outpath='opsimFields', fieldID=True) + # generate_sky(mjd0=mjd1, mjd_max=mjd2+day_pad, + # outpath='opsimFields', fieldID=True) generate_sky(mjd0=mjd1, mjd_max=mjd2 + day_pad) count += 1 diff --git a/rubin_sim/skybrightness/interp_components.py b/rubin_sim/skybrightness/interp_components.py index bdd7393ad..925d83085 100644 --- a/rubin_sim/skybrightness/interp_components.py +++ b/rubin_sim/skybrightness/interp_components.py @@ -36,8 +36,7 @@ def id2intid(ids): - """ - take an array of ids, and convert them to an integer id. + """take an array of ids, and convert them to an integer id. Handy if you want to put things into a sparse array. """ uids = np.unique(ids) @@ -55,9 +54,7 @@ def id2intid(ids): def intid2id(intids, uintids, uids, dtype=int): - """ - convert an int back to an id - """ + """convert an int back to an id""" ids = np.zeros(np.size(intids)) order = np.argsort(intids) @@ -150,15 +147,20 @@ def load_spec_files(filenames, mags=False): class BaseSingleInterp: - """ - Base class for sky components that only need to be interpolated on airmass + """Base class for interpolating sky components which only depend + on airmass. + + Parameters + ---------- + comp_name : `str`, optional + Component name. + sorted_order : `list` [`str`], optional + Order of the dimensions in the input .npz files. + mags : `bool`, optional + Return magnitudes (only) rather than the full spectrum. """ def __init__(self, comp_name=None, sorted_order=["airmass", "nightTimes"], mags=False): - """ - mags: Rather than the full spectrum, return the LSST ugrizy magnitudes. - """ - self.mags = mags data_dir = os.path.join(get_data_dir(), "skybrightness", "ESO_Spectra/" + comp_name) @@ -175,7 +177,7 @@ def __init__(self, comp_name=None, sorted_order=["airmass", "nightTimes"], mags= else: self.spec_size = 0 - # What order are the dimesions sorted by (from how + # What order are the dimensions sorted by (from how # the .npz was packaged) self.sorted_order = sorted_order self.dim_dict = {} @@ -187,21 +189,34 @@ def __init__(self, comp_name=None, sorted_order=["airmass", "nightTimes"], mags= # Set up and save the dict to order the filters once. self.filter_name_dict = {"u": 0, "g": 1, "r": 2, "i": 3, "z": 4, "y": 5} - def __call__(self, intep_points, filter_names=["u", "g", "r", "i", "z", "y"]): + def __call__(self, interp_points, filter_names=["u", "g", "r", "i", "z", "y"]): + """At `interp_points (e.g. airmass), return values.""" if self.mags: - return self.interp_mag(intep_points, filter_names=filter_names) + return self.interp_mag(interp_points, filter_names=filter_names) else: - return self.interp_spec(intep_points) + return self.interp_spec(interp_points) - def indx_and_weights(self, points, grid): - """ - for given 1-D points, find the grid points on - either side and return the weights assume grid is sorted + def _indx_and_weights(self, points, grid): + """For given 1-D points, find the grid points on + either side and return the weights assume grid is sorted. + + Parameters + ---------- + points : `np.ndarray`, (N,) + The points on the grid to query. + grid : `np.ndarray`, (N,) + The grid on which to locate `points`. + + Returns + ------- + indx_r, indx_l : `np.ndarray`, `np.ndarray` + The grid indexes for each of the 1-d points + w_r, w_l : `np.ndarray`, `np.ndarray` + The weights for each of these grid points. """ order = np.argsort(points) - indx_l = np.empty(points.size, dtype=int) indx_r = np.empty(points.size, dtype=int) indx_r[order] = np.searchsorted(grid, points[order]) @@ -235,7 +250,7 @@ def _weighting(self, interp_points, values): (interp_points["airmass"] <= np.max(self.dim_dict["airmass"])) & (interp_points["airmass"] >= np.min(self.dim_dict["airmass"])) ) - indx_r, indx_l, w_r, w_l = self.indx_and_weights( + indx_r, indx_l, w_r, w_l = self._indx_and_weights( interp_points["airmass"][in_range], self.dim_dict["airmass"] ) @@ -266,36 +281,29 @@ def interp_mag(self, interp_points, filter_names=["u", "g", "r", "i", "z", "y"]) class ScatteredStar(BaseSingleInterp): - """ - Interpolate the spectra caused by scattered starlight. - """ + """Interpolate the spectra caused by scattered starlight.""" def __init__(self, comp_name="ScatteredStarLight", mags=False): super(ScatteredStar, self).__init__(comp_name=comp_name, mags=mags) class LowerAtm(BaseSingleInterp): - """ - Interpolate the spectra caused by the lower atmosphere. - """ + """Interpolate the spectra caused by the lower atmosphere.""" def __init__(self, comp_name="LowerAtm", mags=False): super(LowerAtm, self).__init__(comp_name=comp_name, mags=mags) class UpperAtm(BaseSingleInterp): - """ - Interpolate the spectra caused by the upper atmosphere. - """ + """Interpolate the spectra caused by the upper atmosphere.""" def __init__(self, comp_name="UpperAtm", mags=False): super(UpperAtm, self).__init__(comp_name=comp_name, mags=mags) class MergedSpec(BaseSingleInterp): - """ - Interpolate the spectra caused by the sum of the scattered - starlight, airglow, upper and lower atmosphere. + """Interpolate the combined spectra caused by the sum of the scattered + starlight, air glow, upper and lower atmosphere. """ def __init__(self, comp_name="MergedSpec", mags=False): @@ -303,9 +311,7 @@ def __init__(self, comp_name="MergedSpec", mags=False): class Airglow(BaseSingleInterp): - """ - Interpolate the spectra caused by airglow. - """ + """Interpolate the spectra caused by airglow.""" def __init__(self, comp_name="Airglow", sorted_order=["airmass", "solarFlux"], mags=False): super(Airglow, self).__init__(comp_name=comp_name, mags=mags, sorted_order=sorted_order) @@ -321,11 +327,11 @@ def _weighting(self, interp_points, values): & (interp_points["solar_flux"] <= np.max(self.dim_dict["solarFlux"])) ) use_points = interp_points[in_range] - am_right_index, am_left_index, am_right_w, am_left_w = self.indx_and_weights( + am_right_index, am_left_index, am_right_w, am_left_w = self._indx_and_weights( use_points["airmass"], self.dim_dict["airmass"] ) - sf_right_index, sf_left_index, sf_right_w, sf_left_w = self.indx_and_weights( + sf_right_index, sf_left_index, sf_right_w, sf_left_w = self._indx_and_weights( use_points["solar_flux"], self.dim_dict["solarFlux"] ) @@ -338,23 +344,23 @@ def _weighting(self, interp_points, values): class TwilightInterp: - def __init__(self, mags=False, dark_sky_mags=None, fit_results=None): - """Read the Solar spectrum into a handy object and - compute mags in different filters - - Parameters - ---------- - mags : `bool` - If True, only return the LSST filter magnitudes, - otherwise return the full spectrum - dark_sky_mags : dict - Dict of the zenith dark sky values to be assumed. - The twilight fits are done relative to the dark sky level. - fit_results : dict - Dict of twilight parameters based on twilight_func. - Keys should be filter names. - """ + """Use the Solar Spectrum to provide an interpolated spectra or magnitudes + for the twilight sky. + + Parameters + ---------- + mags : `bool` + If True, only return the LSST filter magnitudes, + otherwise return the full spectrum + dark_sky_mags : `dict` + Dict of the zenith dark sky values to be assumed. + The twilight fits are done relative to the dark sky level. + fit_results : `dict` + Dict of twilight parameters based on twilight_func. + Keys should be filter names. + """ + def __init__(self, mags=False, dark_sky_mags=None, fit_results=None): if dark_sky_mags is None: dark_sky_mags = { "u": 22.8, @@ -498,8 +504,7 @@ def __init__(self, mags=False, dark_sky_mags=None, fit_results=None): # away with computing the magnitudes in the __call__ each time. if mags: # Load up the LSST filters and convert the - # solarSpec.flabda and solarSpec.wavelen to fluxes - through_path = through_path = os.path.join(get_data_dir(), "throughputs", "baseline") + # solarSpec.flambda and solarSpec.wavelen to fluxes self.lsst_filter_names = ["u", "g", "r", "i", "z", "y"] self.lsst_equations = np.zeros( (np.size(self.lsst_filter_names), np.size(self.fit_results["B"])), @@ -511,6 +516,7 @@ def __init__(self, mags=False, dark_sky_mags=None, fit_results=None): for i, fn in enumerate(self.filter_names): fits[i, :] = self.fit_results[fn] + through_path = os.path.join(get_data_dir(), "throughputs", "baseline") for filtername in self.lsst_filter_names: bp = np.loadtxt( os.path.join(through_path, "total_" + filtername + ".dat"), @@ -531,11 +537,13 @@ def __init__(self, mags=False, dark_sky_mags=None, fit_results=None): self.filter_name_dict = {"u": 0, "g": 1, "r": 2, "i": 3, "z": 4, "y": 5} def print_fits_used(self): - """ - Print out the fit parameters being used - """ + """Print out the fit parameters being used""" print( - "\\tablehead{\colhead{Filter} & \colhead{$r_{12/z}$} & \colhead{$a$ (1/radians)} & \colhead{$b$ (1/airmass)} & \colhead{$c$ (az term/airmass)} & \colhead{$f_z_dark$ (erg/s/cm$^2$)$\\times 10^8$} & \colhead{m$_z_dark$}}" + r"\\tablehead{\colhead{Filter} & \colhead{$r_{12/z}$} & " + r"\colhead{$a$ (1/radians)} & \colhead{$b$ (1/airmass)} & " + r"\colhead{$c$ (az term/airmass)} & " + r"\colhead{$f_z_dark$ (erg/s/cm$^2$)$\\times 10^8$} & " + r"\colhead{m$_z_dark$}}" ) for key in self.fit_results: numbers = "" @@ -565,6 +573,21 @@ def interp_mag( filter_names=["u", "g", "r", "i", "z", "y"], ): """ + Parameters + ---------- + interp_points : `np.ndarray`, (N, 3) + Interpolation points. Should contain sunAlt, airmass and azRelSun. + max_am : `float`, optional + Maximum airmass to calculate twilight sky to. + limits : `np.ndarray`, (N,), optional + Sun altitude limits + + Returns + ------- + spectra, wavelength : `np.ndarray`, (N, 3), `np.ndarray`, (M,) + + Note + ---- Originally fit the twilight with a cutoff of sun altitude of -11 degrees. I think it can be safely extrapolated farther, but be warned you may be entering a regime where it breaks down. @@ -596,7 +619,24 @@ def interp_mag( def interp_spec(self, interp_points, max_am=3.0, limits=(np.radians(15.0), np.radians(-20.0))): """ - interp_points should have airmass, azRelSun, and sunAlt. + Parameters + ---------- + interp_points : `np.ndarray`, (N, 3) + Interpolation points. Should contain sunAlt, airmass and azRelSun. + max_am : `float`, optional + Maximum airmass to calculate twilight sky to. + limits : `np.ndarray`, (N,), optional + Sun altitude limits + + Returns + ------- + spectra, wavelength : `np.ndarray`, (N, 3), `np.ndarray`, (M,) + + Note + ---- + Originally fit the twilight with a cutoff of sun altitude of + -11 degrees. I think it can be safely extrapolated farther, + but be warned you may be entering a regime where it breaks down. """ npts = np.size(self.solar_wave) @@ -679,12 +719,12 @@ def _weighting(self, interp_points, values): hweights[:, good] = hweights[:, good] / norm[good] # Find the neighboring moonAltitude points in the grid - right_m_as, left_m_as, ma_right_w, ma_left_w = self.indx_and_weights( + right_m_as, left_m_as, ma_right_w, ma_left_w = self._indx_and_weights( interp_points["moonAltitude"], self.dim_dict["moonAltitude"] ) # Find the neighboring moonSunSep points in the grid - right_mss, left_mss, mss_right_w, mss_left_w = self.indx_and_weights( + right_mss, left_mss, mss_right_w, mss_left_w = self._indx_and_weights( interp_points["moonSunSep"], self.dim_dict["moonSunSep"] ) @@ -743,7 +783,7 @@ def _weighting(self, interp_points, values): good = np.where(norm != 0.0)[0] hweights[:, good] = hweights[:, good] / norm[good] - am_right_index, am_left_index, am_right_w, am_left_w = self.indx_and_weights( + am_right_index, am_left_index, am_right_w, am_left_w = self._indx_and_weights( use_points["airmass"], self.dim_dict["airmass"] ) diff --git a/rubin_sim/skybrightness/sky_model.py b/rubin_sim/skybrightness/sky_model.py index 403517ca5..b0518f591 100644 --- a/rubin_sim/skybrightness/sky_model.py +++ b/rubin_sim/skybrightness/sky_model.py @@ -78,43 +78,49 @@ def __init__( precise_alt_az=False, airmass_limit=3.0, ): - """ - Instatiate the SkyModel. This loads all the required template spectra/magnitudes - that will be used for interpolation. + """A model of the sky, including all of the required + template spectra or magnitudes needed to interpolate the + sky spectrum or magnitudes during twilight or night time + at any point on the sky. + Parameters ---------- - Observatory : Site object - object with attributes lat, lon, elev. But default loads LSST. - - twilight : bool (True) + observatory : `rubin_scheduler.site_models.Site`, optional + Default of None loads the LSST site. + twilight : `bool`, optional Include twilight component (True) - zodiacal : bool (True) + zodiacal : `bool`, optional Include zodiacal light component (True) - moon : bool (True) + moon : `bool`, optional Include scattered moonlight component (True) - airglow : bool (True) + airglow : `bool`, optional Include airglow component - lower_atm : bool (False) - Include lower atmosphere component. This component is part of `merged_spec`. - upper_atm : bool (False) - Include upper atmosphere component. This component is part of `merged_spec`. - scattered_star : bool (False) - Include scattered starlight component. This component is part of `merged_spec`. - merged_spec : bool (True) - Compute the lower_atm, upper_atm, and scattered_star simultaneously since they are all - functions of only airmass. - mags : bool (False) - By default, the sky model computes a 17,001 element spectrum. If `mags` is True, + lower_atm : `bool`, optional + Include lower atmosphere component. + This component is part of `merged_spec`. + upper_atm : `bool`, optional + Include upper atmosphere component. + This component is part of `merged_spec`. + scattered_star : `bool`, optional + Include scattered starlight component. + This component is part of `merged_spec`. + merged_spec : `bool`, optional + Compute the lower_atm, upper_atm, and scattered_star + simultaneously since they are all functions of only airmass. + mags : `bool`, optional + By default, the sky model computes a 17,001 element spectrum. + If `mags` is True, the model will return the LSST ugrizy magnitudes (in that order). - precise_alt_az : bool (False) + precise_alt_az : `bool`, optional If False, use the fast alt, az to ra, dec coordinate - transformations that do not take abberation, diffraction, etc + transformations that do not take aberation, diffraction, etc into account. Results in errors up to ~1.5 degrees, - but an order of magnitude faster than coordinate transforms in sims_utils. - airmass_limit : float (3.0) - Most of the models are only accurate to airmass 3.0. If set higher, airmass values - higher than 3.0 are set to 3.0. + but an order of magnitude faster than the precise coordinate + transformations available in rubin_scheduler.utils. + airmass_limit : `float`, optional + Most of the models are only accurate to airmass 3.0. + If set higher, airmass values higher than 3.0 are set to 3.0. """ self.moon = moon @@ -219,17 +225,28 @@ def set_ra_dec_mjd( filter_names=["u", "g", "r", "i", "z", "y"], ): """ - Set the sky parameters by computing the sky conditions on a given MJD and sky location. + Set the sky parameters by computing the sky conditions on a + given MJD and sky location. - - lon: Longitude-like (RA or Azimuth). Can be single number, list, or numpy array - lat: Latitude-like (Dec or Altitude) - mjd: Modified Julian Date for the calculation. Must be single number. - degrees: (False) Assumes lon and lat are radians unless degrees=True - az_alt: (False) Assume lon, lat are RA, Dec unless az_alt=True - solar_flux: solar flux in SFU Between 50 and 310. Default=130. 1 SFU=10^4 Jy. - filter_names: list of fitlers to return magnitudes for (if initialized with mags=True). + Parameters + ---------- + lon : `float` or `np.ndarray`, (N,) + Longitude-like (RA or Azimuth). + Can be single number, list, or numpy array + lat: `float` or `np.ndarray`, (N,) + Latitude-like (Dec or Altitude) + mjd: `float` + Modified Julian Date for the calculation. Must be single number. + degrees: `bool`, optional + If True, lon/lat are in degrees. If False, lon/lat in radians. + az_alt: `bool`, optional + Assume lon, lat are RA, Dec unless az_alt=True + solar_flux: `float` + Solar flux in SFU Between 50 and 310. Default=130. 1 SFU=10^4 Jy. + filter_names: `list` [`str`] + List of filter for which to return magnitudes + (if initialized with mags=True). """ self.filter_names = filter_names if self.mags: @@ -318,9 +335,11 @@ def set_ra_dec_alt_az_mjd( filter_names=["u", "g", "r", "i", "z", "y"], ): """ - Set the sky parameters by computing the sky conditions on a given MJD and sky location. + Set the sky parameters by computing the sky conditions on a + given MJD and sky location. - Use if you already have alt az coordinates so you can skip the coordinate conversion. + Use if you already have alt az coordinates so you can skip the + coordinate conversion. """ self.filter_names = filter_names if self.mags: @@ -373,61 +392,66 @@ def set_ra_dec_alt_az_mjd( def get_computed_vals(self): """ - Return the intermediate values that are caluculated by set_ra_dec_mjd and used for interpolation. - All of these values are also accesible as class atributes, this is a convience method to grab them - all at once and document the formats. + Return the intermediate values that are caluculated by + set_ra_dec_mjd and used for interpolation. + All of these values are also accessible as class attributes, this is + a convenience method to grab them all at once and document the formats. Returns ------- - out : dict - Dictionary of all the intermediate calculated values that may be of use outside - (the key:values in the output dict) - ra : numpy.array + out : `dict` + Dictionary of all the intermediate calculated values that may + be of use outside (the key:values in the output dict) + ra : `np.ndarray`, (N,) RA of the interpolation points (radians) - dec : np.array + dec : `np.ndarray`, (N,) Dec of the interpolation points (radians) - alts : np.array + alts : `np.ndarray`, (N,) Altitude (radians) - azs : np.array + azs : `np.ndarray`, (N,) Azimuth of interpolation points (radians) - airmass : np.array - Airmass values for each point, computed via 1./np.cos(np.pi/2.-self.alts). - solar_flux : float + airmass : `np.ndarray`, (N,) + Airmass values for each point, + computed via 1./np.cos(np.pi/2.-self.alts). + solar_flux : `float` The solar flux used (SFU). - sunAz : float + sunAz : `float` Azimuth of the sun (radians) - sunAlt : float + sunAlt : `float` Altitude of the sun (radians) - sunRA : float + sunRA : `float` RA of the sun (radians) - sunDec : float + sunDec : `float` Dec of the sun (radians) - azRelSun : np.array - Azimuth of each point relative to the sun (0=same direction as sun) (radians) - moonAz : float + azRelSun : `np.ndarray`, (N,) + Azimuth of each point relative to the sun + (0=same direction as sun) (radians) + moonAz : `float` Azimuth of the moon (radians) - moonAlt : float + moonAlt : `float` Altitude of the moon (radians) - moonRA : float + moonRA : `float` RA of the moon (radians) - moonDec : float + moonDec : `float` Dec of the moon (radians). Note, if you want distances - moon_phase : float + moon_phase : `float` Phase of the moon (0-100) - moonSunSep : float + moonSunSep : `float` Seperation of moon and sun (radians) - azRelMoon : np.array + azRelMoon : `np.ndarray`, (N,) Azimuth of each point relative to teh moon - eclipLon : np.array + eclipLon : `np.ndarray`, (N,) Ecliptic longitude (radians) of each point - eclipLat : np.array + eclipLat : `np.ndarray`, (N,) Ecliptic latitude (radians) of each point - sunEclipLon: np.array - Ecliptic longitude (radians) of each point with the sun at longitude zero - - Note that since the alt and az can be calculated using the fast approximation, if one wants - to compute the distance between the the points and the sun or moon, it is probably better to - use the ra,dec positions rather than the alt,az positions. + sunEclipLon: `np.ndarray`, (N,) + Ecliptic longitude (radians) of each point with the sun at + longitude zero + + Note that since the alt and az can be calculated using the fast + approximation, if one wants to compute the distance between the points + and the sun or moon, it is probably better to use the ra,dec positions + rather than the alt,az positions. """ result = {} @@ -555,9 +579,9 @@ def set_params( ): """ Set parameters manually. - Note, you can put in unphysical combinations of Parameters if you want to - (e.g., put a full moon at zenith at sunset). - if the alts kwarg is set it will override the airmass kwarg. + Note, you can put in unphysical combinations of Parameters if you + want to (e.g., put a full moon at zenith at sunset). + If the alts kwarg is set it will override the airmass kwarg. MoonPhase is percent of moon illuminated (0-100) """ @@ -680,7 +704,8 @@ def return_wave_spec(self): """ if self.azs is None: raise ValueError( - "No coordinates set. Use set_ra_dec_mjd, setRaDecAltAzMjd, or setParams methods before calling returnWaveSpec." + "No coordinates set. Use set_ra_dec_mjd, setRaDecAltAzMjd, or " + "setParams methods before calling returnWaveSpec." ) if self.mags: raise ValueError("SkyModel set to interpolate magnitudes. Initialize object with mags=False") @@ -689,26 +714,30 @@ def return_wave_spec(self): return self.wave.copy(), self.spec.copy() def return_mags(self, bandpasses=None): - """ - Convert the computed spectra to a magnitude using the supplied bandpass, - or, if self.mags=True, return the mags in the LSST filters + """Return the skybrightness in magnitudes. + + Convert the computed spectra to a magnitude using the + supplied bandpass, or, if self.mags=True, return the mags in the + LSST filters. Parameters ---------- - bandpasses : dict (None) - Dictionary with bandpass name as keys and rubin_su=im.phot_utils.Bandpass objects as values. + bandpasses : `dict` [`str`, `rubin_sim.phot_utils.Bandpass`], optional + Dictionary with bandpass name as keys and `Bandpass` objects + as values. - If mags=True when initialized, return mags returns an structured array with - dtype names u,g,r,i,z,y. + If mags=True when initialized, return mags returns a structured array + with dtype names u,g,r,i,z,y; the default LSST bandpasses are used. Returns ------- - mags : np.array + mags : `np.ndarray`, (N,) Sky brightness in AB mags/sq arcsec """ if self.azs is None: raise ValueError( - "No coordinates set. Use set_ra_dec_mjd, setRaDecAltAzMjd, or setParams methods before calling return_mags." + "No coordinates set. Use set_ra_dec_mjd, setRaDecAltAzMjd, or " + "setParams methods before calling return_mags." ) if self.mags: @@ -731,7 +760,8 @@ def return_mags(self, bandpasses=None): max_wave = bandpasses[key].wavelen[is_through].max() in_band = np.where((self.wave >= min_wave) & (self.wave <= max_wave)) for i, ra in enumerate(self.ra): - # Check that there is flux in the band, otherwise calc_mag fails + # Check that there is flux in the band, + # otherwise calc_mag fails if np.max(self.spec[i, in_band]) > 0: temp_sed.set_sed(self.wave, flambda=self.spec[i, :]) mags[i] = temp_sed.calc_mag(bandpasses[key]) diff --git a/rubin_sim/skybrightness/twilight_func.py b/rubin_sim/skybrightness/twilight_func.py index 18fe8a10e..917d64e00 100644 --- a/rubin_sim/skybrightness/twilight_func.py +++ b/rubin_sim/skybrightness/twilight_func.py @@ -29,16 +29,18 @@ def twilight_func(xdata, *args, amCut=1.0): az should be relative to the sun (i.e., sun is at az zero. based on what I've seen, here's my guess for how to fit the twilight: - args[0] = ratio of (zenith twilight flux at sun_alt = -12) and dark sky zenith flux + args[0] = ratio of (zenith twilight flux at sun_alt = -12) and dark sky + zenith flux args[1] = decay slope for all pixels (mags/radian) - args[2] = airmass term for hemisphere away from the sun. (factor to multiply max brightness at zenith by) + args[2] = airmass term for hemisphere away from the sun. + (factor to multiply max brightness at zenith by) args[3] = az term for hemisphere towards sun args[4] = zenith dark sky flux - args[5:] = zenith dark sky times constant (optionall) + args[5:] = zenith dark sky times constant (optional) amCut : float (1.0) - The airmass cut to apply to use only the away from sun fit. Was set to 1.1 - previously for not very clear reasons. + The airmass cut to apply to use only the away from sun fit. + Was set to 1.1 previously for not very clear reasons. """ @@ -54,7 +56,8 @@ def twilight_func(xdata, *args, amCut=1.0): flux[towards] *= 10.0 ** (args[3] * np.cos(az[towards]) * (airmass[towards] - 1.0)) # This let's one fit the dark sky background simultaneously. - # It assumes the dark sky is a function of airmass only. Forced to be args[4] at zenith. + # It assumes the dark sky is a function of airmass only. + # Forced to be args[4] at zenith. if np.size(args) >= 6: flux[away] += args[4] * np.exp(args[5:][xdata["hpid"][away]] * (airmass[away] - 1.0)) flux[towards] += args[4] * np.exp(args[5:][xdata["hpid"][towards]] * (airmass[towards] - 1.0)) @@ -64,11 +67,14 @@ def twilight_func(xdata, *args, amCut=1.0): def zenith_twilight(alpha, *args): """ - The flux at zenith as a linear combination of a twilight component and a constant: + The flux at zenith as a linear combination of a twilight component + and a constant: alpha = sun altitude (radians) - args[0] = ratio of (zenith twilight flux at sunAlt = -12) and dark sky zenith flux + args[0] = ratio of (zenith twilight flux at sunAlt = -12) and + dark sky zenith flux args[1] = decay slope for all pixels (mags/radian) - args[2] = airmass term for hemisphere away from the sun. (factor to multiply max brightness at zenith by) + args[2] = airmass term for hemisphere away from the sun. + (factor to multiply max brightness at zenith by) args[3] = az term for hemisphere towards sun args[4] = zenith dark sky flux """ diff --git a/rubin_sim/skybrightness/utils.py b/rubin_sim/skybrightness/utils.py index 4dc81ae2a..dc57de5e1 100644 --- a/rubin_sim/skybrightness/utils.py +++ b/rubin_sim/skybrightness/utils.py @@ -57,7 +57,7 @@ def spec2mags(spectra_list, wave): for j, filtName in enumerate(keys): try: result["mags"][i][j] = tempSed.calc_mag(filters[filtName]) - except: + except ValueError: pass return result, filterwave @@ -66,8 +66,8 @@ def recalc_mags(data_dir=None): """Recalculate the magnitudes for sky brightness components. DANGER: Overwrites data files in place. The rubin_sim_data/skybrightness - folder will need to be packaged and updated after running this to propigate - changes ot rest of users. + folder will need to be packaged and updated after running this to propagate + changes to other users. """ dirs = ["Airglow", "MergedSpec", "ScatteredStarLight", "Zodiacal", "LowerAtm", "Moon", "UpperAtm"] From 4efd4a05943d353b35c5632946122f74ccb246bf Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sat, 13 Jan 2024 20:35:59 -0800 Subject: [PATCH 08/26] Change Exception to ValueError --- rubin_sim/phot_utils/sed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rubin_sim/phot_utils/sed.py b/rubin_sim/phot_utils/sed.py index 65fb401ff..ce5f0fde0 100644 --- a/rubin_sim/phot_utils/sed.py +++ b/rubin_sim/phot_utils/sed.py @@ -1340,7 +1340,7 @@ def calc_mag(self, bandpass, wavelen=None, fnu=None): """ flux = self.calc_flux(bandpass, wavelen=wavelen, fnu=fnu) if flux < 1e-300: - raise Exception("This SED has no flux within this bandpass.") + raise ValueError("This SED has no flux within this bandpass.") mag = self.mag_from_flux(flux) return mag From 8da8cdd7aa9b33dcb221a496873f9af0305e9d2e Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sun, 14 Jan 2024 04:06:08 -0800 Subject: [PATCH 09/26] Ruff and black Doc typos ruff and doc strings maf --- docs/maf-api-metricbundles.rst | 2 +- docs/maf-api.rst | 1 + docs/satellite-constellations.rst | 6 +- rubin_sim/data/__init__.py | 2 +- rubin_sim/maf/metrics/simple_metrics.py | 146 ++++++++++++------ rubin_sim/maf/metrics/summary_metrics.py | 6 +- rubin_sim/maf/plots/spatial_plotters.py | 11 +- rubin_sim/maf/slicers/base_spatial_slicer.py | 4 +- rubin_sim/maf/slicers/user_points_slicer.py | 30 ++-- rubin_sim/moving_objects/orbits.py | 1 - rubin_sim/moving_objects/pre_generate.py | 3 +- rubin_sim/phot_utils/signaltonoise.py | 5 +- rubin_sim/scheduler/__init__.py | 2 +- .../data/ESO_Spectra/eso_tools.py | 43 +++--- .../skybrightness/data/solarSpec/package.py | 5 +- rubin_sim/utils/__init__.py | 2 +- 16 files changed, 168 insertions(+), 101 deletions(-) diff --git a/docs/maf-api-metricbundles.rst b/docs/maf-api-metricbundles.rst index d251e51cd..3e0eaab1c 100644 --- a/docs/maf-api-metricbundles.rst +++ b/docs/maf-api-metricbundles.rst @@ -6,7 +6,7 @@ Metric Bundles ============== -.. automodule:: rubin_sim.maf.metricbundles +.. automodule:: rubin_sim.maf.metric_bundles :imported-members: :members: :show-inheritance: diff --git a/docs/maf-api.rst b/docs/maf-api.rst index df8a3aa5d..40afc2297 100644 --- a/docs/maf-api.rst +++ b/docs/maf-api.rst @@ -18,4 +18,5 @@ MAF API Plots Run Comparison Slicers + Stackers Utils \ No newline at end of file diff --git a/docs/satellite-constellations.rst b/docs/satellite-constellations.rst index 32aa35168..815f41d6d 100644 --- a/docs/satellite-constellations.rst +++ b/docs/satellite-constellations.rst @@ -3,11 +3,11 @@ .. _satellite-constellations: ######################## -Satelilte Constellations +Satellite Constellations ######################## The ``rubin_sim.satellite_constellations`` module contains -tools for creating and propgating satellite mega constellations -to evaluate how they could streak Rubin images. +tools for creating and propagating satellite mega constellations +to evaluate their impact (in terms of streaks) in Rubin images. There is also an extension for ``rubin_scheduler`` that will add "satellite dodging" to the scheduler logic, at a cost of overall image depth. diff --git a/rubin_sim/data/__init__.py b/rubin_sim/data/__init__.py index 7361d80cd..f5af8227a 100644 --- a/rubin_sim/data/__init__.py +++ b/rubin_sim/data/__init__.py @@ -1 +1 @@ -from .rs_download_data import * #noqa: F403 +from .rs_download_data import * # noqa: F403 diff --git a/rubin_sim/maf/metrics/simple_metrics.py b/rubin_sim/maf/metrics/simple_metrics.py index 36bd6bdbe..c861514cc 100644 --- a/rubin_sim/maf/metrics/simple_metrics.py +++ b/rubin_sim/maf/metrics/simple_metrics.py @@ -38,14 +38,18 @@ from .base_metric import BaseMetric -# A collection of commonly used simple metrics, operating on a single column and returning a float. +# A collection of commonly used simple metrics, +# operating on a single column and returning a float. twopi = 2.0 * np.pi class PassMetric(BaseMetric): - """ - Just pass the entire array through + """Pass the entire dataslice array back to the MetricBundle. + + This is most likely useful while prototyping metrics and wanting to + just 'get the data at a point in the sky', while using a HealpixSlicer + or a UserPointSlicer. """ def __init__(self, cols=None, **kwargs): @@ -58,64 +62,86 @@ def run(self, data_slice, slice_point=None): class Coaddm5Metric(BaseMetric): - """Calculate the coadded m5 value at this gridpoint. + """Calculate the coadded m5 value. Parameters ---------- - m5Col : `str`, optional + m5_col : `str`, optional Name of the m5 column. Default fiveSigmaDepth. metric_name : `str`, optional - Name to associate with the metric output. + Name to associate with the metric output. Default "CoaddM5". + filter_name : `str`, optional + Optionally specify a filter to sub-select visits. + Default None, does no sub-selection or checking. + filter_col : `str`, optional + Name of the filter column. """ - def __init__(self, m5_col="fiveSigmaDepth", metric_name="CoaddM5", **kwargs): + def __init__(self, m5_col="fiveSigmaDepth", metric_name="CoaddM5", + filter_name=None, filter_col="Filter", **kwargs): + self.filter_name = filter_name + self.filter_col = filter_col + self.m5_col = m5_col super(Coaddm5Metric, self).__init__(col=m5_col, metric_name=metric_name, **kwargs) + @staticmethod + def coadd(single_visit_m5s): + return 1.25 * np.log10(np.sum(10.0 ** (0.8 * single_visit_m5s))) + def run(self, data_slice, slice_point=None): - # Running this metric directly from the slicer, this should never come up. - # However, other metrics call this one and maybe had visits in other filters .. if len(data_slice) == 0: return self.badval - return 1.25 * np.log10(np.sum(10.0 ** (0.8 * data_slice[self.colname]))) + if self.filter_name is not None: + matched = np.where(data_slice[self.filter_col] == self.filter_name) + coadd = self.coadd(data_slice[matched][self.m5_col]) + else: + coadd = self.coadd(data_slice[self.m5_col]) + return coadd class MaxMetric(BaseMetric): - """Calculate the maximum of a simData column slice.""" + """Calculate the maximum of a simData column slice. + """ def run(self, data_slice, slice_point=None): return np.max(data_slice[self.colname]) class AbsMaxMetric(BaseMetric): - """Calculate the max of the absolute value of a simData column slice.""" + """Calculate the max of the absolute value of a simData column slice. + """ def run(self, data_slice, slice_point=None): return np.max(np.abs(data_slice[self.colname])) class MeanMetric(BaseMetric): - """Calculate the mean of a simData column slice.""" + """Calculate the mean of a simData column slice. + """ def run(self, data_slice, slice_point=None): return np.mean(data_slice[self.colname]) class AbsMeanMetric(BaseMetric): - """Calculate the mean of the absolute value of a simData column slice.""" + """Calculate the mean of the absolute value of a simData column slice. + """ def run(self, data_slice, slice_point=None): return np.mean(np.abs(data_slice[self.colname])) class MedianMetric(BaseMetric): - """Calculate the median of a simData column slice.""" + """Calculate the median of a simData column slice. + """ def run(self, data_slice, slice_point=None): return np.median(data_slice[self.colname]) class AbsMedianMetric(BaseMetric): - """Calculate the median of the absolute value of a simData column slice.""" + """Calculate the median of the absolute value of a simData column slice. + """ def run(self, data_slice, slice_point=None): return np.median(np.abs(data_slice[self.colname])) @@ -164,7 +190,8 @@ def run(self, data_slice, slice_point=None): class UniqueRatioMetric(BaseMetric): - """Return the number of unique values divided by the total number of values.""" + """Return the number of unique values divided by the + total number of values.""" def run(self, data_slice, slice_point=None): ntot = float(np.size(data_slice[self.colname])) @@ -184,7 +211,8 @@ def run(self, data_slice, slice_point=None): class CountExplimMetric(BaseMetric): - """Count the number of x second visits. Useful for rejecting very short exposures + """Count the number of x second visits. + Useful for rejecting very short exposures and counting 60s exposures as 2 visits.""" def __init__(self, col=None, min_exp=20.0, expected_exp=30.0, exp_col="visitExposureTime", **kwargs): @@ -202,7 +230,7 @@ def run(self, data_slice, slice_point=None): class CountRatioMetric(BaseMetric): - """Count the length of a simData column slice, then divide by 'norm_val'.""" + """Count the length of a column slice, then divide by `norm_val`.""" def __init__(self, col=None, norm_val=1.0, metric_name=None, units="", **kwargs): self.norm_val = float(norm_val) @@ -215,7 +243,7 @@ def run(self, data_slice, slice_point=None): class CountSubsetMetric(BaseMetric): - """Count the length of a simData column slice which matches 'subset'.""" + """Count the length of a column slice which matches `subset`.""" def __init__(self, col=None, subset=None, units="#", **kwargs): super(CountSubsetMetric, self).__init__(col=col, units=units, **kwargs) @@ -229,7 +257,8 @@ def run(self, data_slice, slice_point=None): class CountBeyondThreshold(BaseMetric): - """Count the number of entries in a data column above or below the threshold.""" + """Count the number of entries in a data column above or below + the `threshold`.""" def __init__(self, col=None, lower_threshold=None, upper_threshold=None, **kwargs): super().__init__(col=col, **kwargs) @@ -256,7 +285,7 @@ def run(self, data_slice, slice_point=None): class RobustRmsMetric(BaseMetric): """Use the inter-quartile range of the data to estimate the RMS. - Robust since this calculation does not include outliers in the distribution. + Robust, as this calculation does not include outliers in the distribution. """ def run(self, data_slice, slice_point=None): @@ -266,7 +295,8 @@ def run(self, data_slice, slice_point=None): class MaxPercentMetric(BaseMetric): - """Return the percent of the data which has the maximum value.""" + """Return the percent of data which matches the maximum value of the data. + """ def run(self, data_slice, slice_point=None): n_max = np.size(np.where(data_slice[self.colname] == np.max(data_slice[self.colname]))[0]) @@ -275,7 +305,9 @@ def run(self, data_slice, slice_point=None): class AbsMaxPercentMetric(BaseMetric): - """Return the percent of the data which has the absolute value of the max value of the data.""" + """Return the percent of data which matches the absolute value of the + max value of the data. + """ def run(self, data_slice, slice_point=None): max_val = np.abs(np.max(data_slice[self.colname])) @@ -285,7 +317,8 @@ def run(self, data_slice, slice_point=None): class BinaryMetric(BaseMetric): - """Return 1 if there is data.""" + """Return 1 if there is data, `badval` otherwise. + """ def run(self, data_slice, slice_point=None): if data_slice.size > 0: @@ -295,7 +328,8 @@ def run(self, data_slice, slice_point=None): class FracAboveMetric(BaseMetric): - """Find the fraction of data values above a given value.""" + """Find the fraction of data values above a given `cutoff`. + """ def __init__(self, col=None, cutoff=0.5, scale=1, metric_name=None, **kwargs): # Col could just get passed in bundle with kwargs, but by explicitly pulling it out @@ -314,7 +348,8 @@ def run(self, data_slice, slice_point=None): class FracBelowMetric(BaseMetric): - """Find the fraction of data values below a given value.""" + """Find the fraction of data values below a given `cutoff`. + """ def __init__(self, col=None, cutoff=0.5, scale=1, metric_name=None, **kwargs): if metric_name is None: @@ -331,7 +366,8 @@ def run(self, data_slice, slice_point=None): class PercentileMetric(BaseMetric): - """Find the value of a column at a given percentile.""" + """Find the value of a column at a given `percentile`. + """ def __init__(self, col=None, percentile=90, metric_name=None, **kwargs): if metric_name is None: @@ -345,8 +381,8 @@ def run(self, data_slice, slice_point=None): class NoutliersNsigmaMetric(BaseMetric): - """Calculate the # of visits less than n_sigma below the mean (n_sigma<0) or - more than n_sigma above the mean of 'col'. + """Calculate the # of visits less than n_sigma below the mean (n_sigma<0) + or more than n_sigma above the mean. """ def __init__(self, col=None, n_sigma=3.0, metric_name=None, **kwargs): @@ -374,9 +410,10 @@ def _rotate_angles(angles): """Private utility for the '*Angle' Metrics below. This takes a series of angles between 0-2pi and rotates them so that the - first angle is at 0, ensuring the biggest 'gap' is at the end of the series. - This simplifies calculations like the 'mean' and 'rms' or 'fullrange', removing - the discontinuity at 0/2pi. + first angle is at 0, ensuring the biggest 'gap' is at the end of the + series. + This simplifies calculations like the 'mean' and 'rms' or 'fullrange', + removing the discontinuity at 0/2pi. """ angleidx = np.argsort(angles) diffangles = np.diff(angles[angleidx]) @@ -395,14 +432,15 @@ def _rotate_angles(angles): class MeanAngleMetric(BaseMetric): - """Calculate the mean of an angular (degree) simData column slice. + """Calculate the mean of an angular (degree) column slice. 'MeanAngle' differs from 'Mean' in that it accounts for wraparound at 2pi. """ def run(self, data_slice, slice_point=None): """Calculate mean angle via unit vectors. - If unit vector 'strength' is less than 0.1, then just set mean to 180 degrees + If unit vector 'strength' is less than 0.1, + then just set mean to 180 degrees (as this indicates nearly uniformly distributed angles). """ x = np.cos(np.radians(data_slice[self.colname])) @@ -418,7 +456,7 @@ def run(self, data_slice, slice_point=None): class RmsAngleMetric(BaseMetric): - """Calculate the standard deviation of an angular (degrees) simData column slice. + """Calculate the standard deviation of an angular (degrees) column slice. 'RmsAngle' differs from 'Rms' in that it accounts for wraparound at 2pi. """ @@ -429,9 +467,10 @@ def run(self, data_slice, slice_point=None): class FullRangeAngleMetric(BaseMetric): - """Calculate the full range of an angular (degrees) simData column slice. + """Calculate the full range of an angular (degrees) column slice. - 'FullRangeAngle' differs from 'FullRange' in that it accounts for wraparound at 2pi. + 'FullRangeAngle' differs from 'FullRange' in that it accounts for + wraparound at 2pi. """ def run(self, data_slice, slice_point=None): @@ -440,24 +479,30 @@ def run(self, data_slice, slice_point=None): class AngularSpreadMetric(BaseMetric): - """Compute the angular spread statistic which measures uniformity of a distribution angles - accounting for 2pi periodicity. - - The strategy is to first map angles into unit vectors on the unit circle, and then compute the - 2D centroid of those vectors. A uniform distribution of angles will lead to a distribution of - unit vectors with mean that approaches the origin. In contrast, a delta function distribution - of angles leads to a delta function distribution of unit vectors with a mean that lands on the + """Compute the angular spread statistic which measures + uniformity of a distribution angles accounting for 2pi periodicity. + + The strategy is to first map angles into unit vectors on the unit circle, + and then compute the 2D centroid of those vectors. + A uniform distribution of angles will lead to a distribution of + unit vectors with mean that approaches the origin. + In contrast, a delta function distribution of angles leads to a + delta function distribution of unit vectors with a mean that lands on the unit circle. - The angular spread statistic is then defined as 1 - R, where R is the radial offset of the mean - of the unit vectors derived from the input angles. R approaches 1 for a uniform distribution + The angular spread statistic is then defined as 1 - R, + where R is the radial offset of the mean + of the unit vectors derived from the input angles. + R approaches 1 for a uniform distribution of angles and 0 for a delta function distribution of angles. - The optional parameter `period` may be used to specificy periodicity other than 2 pi. + The optional parameter `period` may be used to specificy periodicity + other than 2 pi. """ def __init__(self, col=None, period=2.0 * np.pi, **kwargs): - # https://en.wikipedia.org/wiki/Directional_statistics#Measures_of_location_and_spread + # https://en.wikipedia.org/wiki/Directional_statistics + # #Measures_of_location_and_spread # jmeyers314@gmail.com self.period = period super(AngularSpreadMetric, self).__init__(col=col, **kwargs) @@ -474,7 +519,8 @@ def run(self, data_slice, slice_point=None): class RealMeanMetric(BaseMetric): - """Calculate the mean of a simData column slice with no nans or infs.""" + """Calculate the mean of a column with no nans or infs. + """ def run(self, data_slice, slice_point=None): return np.mean(data_slice[self.colname][np.isfinite(data_slice[self.colname])]) diff --git a/rubin_sim/maf/metrics/summary_metrics.py b/rubin_sim/maf/metrics/summary_metrics.py index 31cb3aea6..eac413698 100644 --- a/rubin_sim/maf/metrics/summary_metrics.py +++ b/rubin_sim/maf/metrics/summary_metrics.py @@ -157,8 +157,10 @@ def run(self, data_slice, slice_point=None): class IdentityMetric(BaseMetric): - """ - Return the metric value itself .. this is primarily useful as a summary statistic for UniSlicer metrics. + """Return the metric value. + + This is primarily useful as a summary statistic for UniSlicer metrics, + to propagate the ~MetricBundle.metric_value into the results database. """ def run(self, data_slice, slice_point=None): diff --git a/rubin_sim/maf/plots/spatial_plotters.py b/rubin_sim/maf/plots/spatial_plotters.py index 5788fe014..b0f15ef8d 100644 --- a/rubin_sim/maf/plots/spatial_plotters.py +++ b/rubin_sim/maf/plots/spatial_plotters.py @@ -50,6 +50,7 @@ "n_ticks": 10, "color_min": None, "color_max": None, + "extend": "neither", "x_min": None, "x_max": None, "y_min": None, @@ -263,11 +264,16 @@ def __call__(self, metric_value_in, slicer, user_plot_dict, fignum=None): warnings.simplefilter("ignore") # The vertical colorbar is primarily aimed at the movie # but may be useful for other purposes + if plot_dict["extend"] is not "neither": + extendrect = False + else: + extendrect = True if plot_dict["cbar_orientation"].lower() == "vertical": cb = plt.colorbar( im, shrink=0.5, - extendrect=True, + extendrect=extendrect, + extend=plot_dict["extend"], location="right", format=plot_dict["cbar_format"], ) @@ -280,7 +286,8 @@ def __call__(self, metric_value_in, slicer, user_plot_dict, fignum=None): pad=0.1, orientation="horizontal", format=plot_dict["cbar_format"], - extendrect=True, + extendrect=extendrect, + extend=plot_dict["extend"], ) cb.set_label(plot_dict["xlabel"], fontsize=plot_dict["fontsize"]) if plot_dict["labelsize"] is not None: diff --git a/rubin_sim/maf/slicers/base_spatial_slicer.py b/rubin_sim/maf/slicers/base_spatial_slicer.py index 708910e2b..6416f9c06 100644 --- a/rubin_sim/maf/slicers/base_spatial_slicer.py +++ b/rubin_sim/maf/slicers/base_spatial_slicer.py @@ -41,7 +41,7 @@ class BaseSpatialSlicer(BaseSlicer): rot_sky_pos_col_name : `str`, optional Name of the rotSkyPos column in the input data. Only used if use_camera is True. - Describes the orientation of the camera orientation on the the sky. + Describes the orientation of the camera orientation on the sky. lat_lon_deg : `bool`, optional Flag indicating whether lat and lon values from input data are in degrees (True) or radians (False). @@ -53,7 +53,7 @@ class BaseSpatialSlicer(BaseSlicer): Leafsize value for kdtree. radius : `float`, optional Radius for matching in the kdtree. - Equivalent to the radius of the FOV, in degrees. + Equivalent to the radius of the FOV, in degrees. use_camera : `bool`, optional Flag to indicate whether to use the LSST camera footprint or not. camera_footprint_file : `str`, optional diff --git a/rubin_sim/maf/slicers/user_points_slicer.py b/rubin_sim/maf/slicers/user_points_slicer.py index a466c9cbf..16807bbef 100644 --- a/rubin_sim/maf/slicers/user_points_slicer.py +++ b/rubin_sim/maf/slicers/user_points_slicer.py @@ -10,7 +10,9 @@ class UserPointsSlicer(BaseSpatialSlicer): - """A spatial slicer that evaluates pointings overlapping user-provided list of points. + """A spatial slicer based on a user-provided list of points. + The data_slices returned are the visit pointings which overlap + each of these points. Parameters ---------- @@ -19,32 +21,37 @@ class UserPointsSlicer(BaseSpatialSlicer): dec : `list` or `numpy.ndarray` User-selected Dec points, in degrees. Stored internally in radians. lon_col : `str`, optional - Name of the longitude (RA equivalent) column to use from the input data. + Name of the longitude (RA equivalent) column in the input data. Default fieldRA lat_col : `str`, optional - Name of the latitude (Dec equivalent) column to use from the input data. + Name of the latitude (Dec equivalent) column in the input data. Default fieldDec latLonDeg : `bool`, optional - Flag indicating whether the lon and lat values will be in degrees (True) or radians (False). - Default True (appropriate for opsim v4). + Flag indicating whether the lon and lat values will be in + degrees (True) or radians (False). + Default True. verbose : `bool`, optional - Flag to indicate whether or not to write additional information to stdout during runtime. + Flag to indicate whether or not to write additional + information to stdout during runtime. Default True. badval : `float`, optional Bad value flag, relevant for plotting. Default -666. leafsize : `int`, optional Leafsize value for kdtree. Default 100. radius : `float`, optional - Radius for matching in the kdtree. Equivalent to the radius of the FOV. Degrees. + Radius for matching in the kdtree. + Equivalent to the radius of the FOV. Degrees. Default 2.45. use_camera : `bool`, optional Flag to indicate whether to use the LSST camera footprint or not. Default True. camera_footprint_file : `str`, optional - Name of the camera footprint map to use. Can be None, which will use the default. + Name of the camera footprint map to use. + Can be None, which will use the default. rotSkyPosColName : `str`, optional - Name of the rotSkyPos column in the input data. Only used if use_camera is True. - Describes the orientation of the camera orientation compared to the sky. + Name of the rotSkyPos column in the input data. + Only used if use_camera is True. + Describes the camera orientation compared to the sky. Default rotSkyPos. """ @@ -75,7 +82,8 @@ def __init__( camera_footprint_file=camera_footprint_file, rot_sky_pos_col_name=rot_sky_pos_col_name, ) - # check that ra and dec are iterable, if not, they are probably naked numbers, wrap in list + # check that ra and dec are iterable, + # if not, they are probably naked numbers, wrap in list if not hasattr(ra, "__iter__"): ra = [ra] if not hasattr(dec, "__iter__"): diff --git a/rubin_sim/moving_objects/orbits.py b/rubin_sim/moving_objects/orbits.py index 08c94682e..b1ce56561 100644 --- a/rubin_sim/moving_objects/orbits.py +++ b/rubin_sim/moving_objects/orbits.py @@ -215,7 +215,6 @@ def set_orbits(self, orbits): # All is good. self.orbits = orbits - @staticmethod def assign_sed(self, orbits, random_seed=None): """Assign either a C or S type SED, depending on the semi-major axis of the object. diff --git a/rubin_sim/moving_objects/pre_generate.py b/rubin_sim/moving_objects/pre_generate.py index fdacc95d7..4f72285ed 100644 --- a/rubin_sim/moving_objects/pre_generate.py +++ b/rubin_sim/moving_objects/pre_generate.py @@ -7,8 +7,7 @@ from rubin_sim.moving_objects import DirectObs, Orbits if __name__ == "__main__": - """Pre-generate a series of nightly ephemerides with a 1-night timestep. - """ + """Pre-generate a series of nightly ephemerides with a 1-night timestep.""" mjd_start = 60676.0 length = 365.25 * 12 # How long to pre-compute for dtime = 1 diff --git a/rubin_sim/phot_utils/signaltonoise.py b/rubin_sim/phot_utils/signaltonoise.py index 706f9a52d..fcaab2777 100644 --- a/rubin_sim/phot_utils/signaltonoise.py +++ b/rubin_sim/phot_utils/signaltonoise.py @@ -13,7 +13,7 @@ "mag_error_from_snr", "calc_mag_error_m5", "calc_mag_error_sed", - "scale_sky_m5" + "scale_sky_m5", ) import numpy @@ -656,6 +656,7 @@ def calc_astrometric_error(mag, m5, fwhm_geom=0.7, nvisit=1, systematic_floor=10 astrom_error = numpy.sqrt(error_sys * error_sys + error_rand * error_rand) return astrom_error + def scale_sky_m5(m5target, skysed, total_bandpass, hardware, phot_params, fwhm_eff=0.83): """ Take an SED representing the sky and normalize it so that @@ -691,4 +692,4 @@ def scale_sky_m5(m5target, skysed, total_bandpass, hardware, phot_params, fwhm_e ) sky_sed_out.multiply_flux_norm(sky_counts_target / sky_counts) - return sky_sed_out \ No newline at end of file + return sky_sed_out diff --git a/rubin_sim/scheduler/__init__.py b/rubin_sim/scheduler/__init__.py index 38f78be99..3a0ad0c5f 100644 --- a/rubin_sim/scheduler/__init__.py +++ b/rubin_sim/scheduler/__init__.py @@ -1,6 +1,6 @@ import warnings -from rubin_scheduler.scheduler import * #noqa: F403 +from rubin_scheduler.scheduler import * # noqa: F403 warnings.simplefilter("default") warnings.warn("rubin_sim.scheduler is deprecated, switch to rubin_scheduler.scheduler", DeprecationWarning) diff --git a/rubin_sim/skybrightness/data/ESO_Spectra/eso_tools.py b/rubin_sim/skybrightness/data/ESO_Spectra/eso_tools.py index 212c9a2a6..094f04834 100644 --- a/rubin_sim/skybrightness/data/ESO_Spectra/eso_tools.py +++ b/rubin_sim/skybrightness/data/ESO_Spectra/eso_tools.py @@ -4,14 +4,17 @@ import healpy as hp import numpy as np from astropy.io import fits -from lsst.sims.photUtils import Bandpass, Sed -from lsst.sims.utils import angular_separation +from rubin_scheduler.utils import angular_separation + +from rubin_sim.data import get_data_dir +from rubin_sim.phot_utils import Bandpass, Sed ## Tools for calling and reading things from the ESO sky model. -# Downloaded and installed from XXX. -# Installing is a major headache, let's hope we don't have to do this again anytime soon +# Installing 'calcskymodel' is a major headache, +# let's hope we don't have to do this again anytime soon -# Run this in the sm-01_mod2 direcory to regenerate the sims_skybrightness save files. +# Run this in the sm-01_mod2 directory to regenerate the sims_skybrightness +# save files. hPlank = 6.626068e-27 # erg s @@ -105,13 +108,12 @@ def call_calcskymodel(): def spec2mags(spectra_list, wave): # Load LSST filters - throughPath = os.getenv("LSST_THROUGHPUTS_BASELINE") + throughPath = os.path.join(get_data_dir(), "throughputs", "baseline") keys = ["u", "g", "r", "i", "z", "y"] dtype = [("mags", "float", (6))] result = np.zeros(len(spectra_list), dtype=dtype) - nfilt = len(keys) filters = {} for filtername in keys: bp = np.loadtxt( @@ -119,25 +121,25 @@ def spec2mags(spectra_list, wave): dtype=list(zip(["wave", "trans"], [float] * 2)), ) tempB = Bandpass() - tempB.setBandpass(bp["wave"], bp["trans"]) + tempB.set_bandpass(bp["wave"], bp["trans"]) filters[filtername] = tempB - filterwave = np.array([filters[f].calcEffWavelen()[0] for f in keys]) + filterwave = np.array([filters[f].calc_eff_wavelen()[0] for f in keys]) for i, spectrum in enumerate(spectra_list): tempSed = Sed() - tempSed.setSED(wave, flambda=spectrum) + tempSed.set_sed(wave, flambda=spectrum) for j, filtName in enumerate(keys): try: - result["mags"][i][j] = tempSed.calcMag(filters[filtName]) - except: + result["mags"][i][j] = tempSed.calc_mag(filters[filtName]) + except ValueError: pass return result, filterwave def generate_airglow(outDir=None): if outDir is None: - dataDir = os.getenv("SIMS_SKYBRIGHTNESS_DATA_DIR") + dataDir = os.path.join(get_data_dir(), "skybrightness") outDir = os.path.join(dataDir, "ESO_Spectra/Airglow") ams = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0]) @@ -184,7 +186,7 @@ def generate_airglow(outDir=None): def generate_loweratm(outDir=None): if outDir is None: - dataDir = os.getenv("SIMS_SKYBRIGHTNESS_DATA_DIR") + dataDir = os.path.join(get_data_dir(), "skybrightness") outDir = os.path.join(dataDir, "ESO_Spectra/LowerAtm") ams = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0]) @@ -230,10 +232,11 @@ def generate_loweratm(outDir=None): def merged_spec(): - dataDir = os.getenv("SIMS_SKYBRIGHTNESS_DATA_DIR") + dataDir = os.path.join(get_data_dir(), "skybrightness") outDir = os.path.join(dataDir, "ESO_Spectra/MergedSpec") - # A large number of the background components only depend on Airmass, so we can merge those together + # A large number of the background components only depend on Airmass, + # so we can merge those together npzs = [ "LowerAtm/Spectra.npz", @@ -266,7 +269,7 @@ def merged_spec(): def generate_moon(outDir=None): if outDir is None: - dataDir = os.getenv("SIMS_SKYBRIGHTNESS_DATA_DIR") + dataDir = os.path.join(get_data_dir(), "skybrightness") outDir = os.path.join(dataDir, "ESO_Spectra/Moon") nside = 4 @@ -342,7 +345,7 @@ def generate_moon(outDir=None): def generate_scatteredStar(outDir=None): if outDir is None: - dataDir = os.getenv("SIMS_SKYBRIGHTNESS_DATA_DIR") + dataDir = os.path.join(get_data_dir(), "skybrightness") outDir = os.path.join(dataDir, "ESO_Spectra/ScatteredStarLight") ams = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0]) @@ -389,7 +392,7 @@ def generate_scatteredStar(outDir=None): def generate_upperatm(outDir=None): if outDir is None: - dataDir = os.getenv("SIMS_SKYBRIGHTNESS_DATA_DIR") + dataDir = os.path.join(get_data_dir(), "skybrightness") outDir = os.path.join(dataDir, "ESO_Spectra/UpperAtm") ams = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0]) @@ -436,7 +439,7 @@ def generate_upperatm(outDir=None): def generate_zodi(outDir=None): if outDir is None: - dataDir = os.getenv("SIMS_SKYBRIGHTNESS_DATA_DIR") + dataDir = os.path.join(get_data_dir(), "skybrightness") outDir = os.path.join(dataDir, "ESO_Spectra/Zodiacal") ams = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0]) diff --git a/rubin_sim/skybrightness/data/solarSpec/package.py b/rubin_sim/skybrightness/data/solarSpec/package.py index 0d168506e..15b7c62e1 100644 --- a/rubin_sim/skybrightness/data/solarSpec/package.py +++ b/rubin_sim/skybrightness/data/solarSpec/package.py @@ -9,12 +9,13 @@ os.path.join(dataDir, "solarSpec/solarSpec.dat"), dtype=list(zip(["microns", "Irr"], [float] * 2)), ) -# data['Irr'] = data['Irr']*1 #convert W/m2/micron to erg/s/cm2/nm (HA, it's the same!) +# #convert W/m2/micron to erg/s/cm2/nm (HA, it's the same!) +# data['Irr'] = data['Irr']*1 sun = Sed() sun.setSED(data["microns"] * 1e3, flambda=data["Irr"]) -# Match the wavelenth spacing and range to the ESO spectra +# Match the wavelength spacing and range to the ESO spectra airglowSpec = np.load(os.path.join(dataDir, "ESO_Spectra/Airglow/airglowSpectra.npz")) sun.resampleSED(wavelen_match=airglowSpec["wave"]) diff --git a/rubin_sim/utils/__init__.py b/rubin_sim/utils/__init__.py index 783608b51..873d45bd6 100644 --- a/rubin_sim/utils/__init__.py +++ b/rubin_sim/utils/__init__.py @@ -1,6 +1,6 @@ import warnings -from rubin_scheduler.utils import * #noqa: F403 +from rubin_scheduler.utils import * # noqa: F403 warnings.simplefilter("default") warnings.warn("rubin_sim.utils is deprecated, switch to rubin_scheduler.utils", DeprecationWarning) From c1fa9f39eba9c8dc7c8089f6307bf0e2030528c7 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sun, 14 Jan 2024 15:13:56 -0800 Subject: [PATCH 10/26] Typo in workflow --- .github/workflows/build_docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_docs.yaml b/.github/workflows/build_docs.yaml index 71648c56d..9534b09ac 100644 --- a/.github/workflows/build_docs.yaml +++ b/.github/workflows/build_docs.yaml @@ -30,7 +30,7 @@ jobs: run: | mamba install --quiet --file=requirements.txt mamba install --quiet pip - pip install "documenteer[guild]" + pip install "documenteer[guide]" - name: install rubin_sim shell: bash -l {0} From 9e8d5c4acd45e20356a9c1ae6b17da83039769c6 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sun, 14 Jan 2024 23:07:30 -0800 Subject: [PATCH 11/26] Ruff and docstring updates --- rubin_sim/maf/maps/base_map.py | 8 +- rubin_sim/maf/maps/create_gaia_density_map.py | 33 +++-- rubin_sim/maf/maps/dust_map.py | 27 ++-- rubin_sim/maf/maps/dust_map_3d.py | 84 +++++++----- rubin_sim/maf/maps/ebv_3d_hp.py | 31 +++-- rubin_sim/maf/maps/ebv_hp.py | 15 +- rubin_sim/maf/maps/gal_coords_map.py | 2 + .../maf/maps/galactic_plane_priority_maps.py | 74 ++++++---- rubin_sim/maf/maps/stellar_density_map.py | 20 ++- rubin_sim/maf/maps/trilegal_map.py | 16 ++- rubin_sim/maf/metrics/simple_metrics.py | 41 ++---- rubin_sim/maf/utils/generate_fov_map.py | 24 +--- rubin_sim/maf/web/maf_run_results.py | 128 ++++++++++++------ rubin_sim/maf/web/maf_tracking.py | 51 ++++--- .../satellite_constellations/__init__.py | 6 +- rubin_sim/selfcal/__init__.py | 8 +- rubin_sim/skybrightness/__init__.py | 10 +- 17 files changed, 331 insertions(+), 247 deletions(-) diff --git a/rubin_sim/maf/maps/base_map.py b/rubin_sim/maf/maps/base_map.py index 408fea651..deea89207 100644 --- a/rubin_sim/maf/maps/base_map.py +++ b/rubin_sim/maf/maps/base_map.py @@ -41,7 +41,7 @@ def help(cls, doc=False): class BaseMap(metaclass=MapsRegistry): - """ """ + """Base for maps classes.""" def __init__(self, **kwargs): self.keynames = ["newkey"] @@ -65,8 +65,8 @@ def __ge__(self, othermap): return self.keynames >= othermap.keynames def run(self, slice_points): - """ - Given slice_points (dict containing metadata about each slice_point, including ra/dec), - adds additional metadata at each slice_point and returns updated dict. + """Given slice_points (dict containing metadata about each slice_point, + including ra/dec), adds additional metadata at each slice_point + and returns updated dict. """ raise NotImplementedError("This must be defined in subclass") diff --git a/rubin_sim/maf/maps/create_gaia_density_map.py b/rubin_sim/maf/maps/create_gaia_density_map.py index 03245e833..bdb2cf1b1 100755 --- a/rubin_sim/maf/maps/create_gaia_density_map.py +++ b/rubin_sim/maf/maps/create_gaia_density_map.py @@ -7,17 +7,21 @@ # Modifying createStarDensityMap to use GAIA DR1 catalog -# Use the catsim framework to loop over a healpy map and generate a stellar density map +# Use the catsim framework to loop over a healpy map and generate a +# stellar density map -# Connect to fatboy with: ssh -L 51433:fatboy.phys.washington.edu:1433 gateway.astro.washington.edu +# Connect to fatboy with: ssh -L 51433:fatboy.phys.washington.edu:1433 +# gateway.astro.washington.edu # If non-astro user, use simsuser@gateway.astro.washington.edu +# NOTE: fatboy is no longer operative + if __name__ == "__main__": # Hide imports here so documentation builds - from rubin_sim.catalogs.db import DBObject - from rubin_sim.utils import angular_separation, halfSpaceFromRaDec + from lsst.sims.catalogs.db import DBObject + from lsst.sims.utils import angular_separation, halfSpaceFromRaDec - # from rubin_sim.catalogs.generation.db import CatalogDBObject + # from lsst.sims.catalogs.generation.db import CatalogDBObject # Import the bits needed to get the catalog to work # from rubin_sim.catUtils.baseCatalogModels import * # from rubin_sim.catUtils.exampleCatalogDefinitions import * @@ -61,7 +65,7 @@ over_max_mask = data["over_max_mask"].copy() print("") - # Look at a cirular area the same area as the healpix it's centered on. + # Look at a circular area the same area as the healpix it's centered on. bound_length = hpsize_deg / np.pi**0.5 radius = bound_length @@ -76,12 +80,14 @@ chunk_size = 10000 for i in np.arange(indx_min, int(npix)): last_cp = "" - # wonder what the units of bound_length are...degrees! And it's a radius + # wonder what the units of bound_length are...degrees! + # And it's a radius # The newer interface: # obs_metadata = ObservationMetaData(bound_type='circle', ## pointing_ra=np.degrees(ra[i]), # pointing_dec=np.degrees(dec[i]), - # bound_length=bound_length, mjd=5700) + # bound_length=bound_length, + # mjd=5700) # t = dbobj.getCatalog('ref_catalog_star', obs_metadata=obs_metadata) hs = halfSpaceFromRaDec(ra[i], dec[i], radius) @@ -106,18 +112,21 @@ results = gaia_db.get_arbitrary_chunk_iterator(query, dtype=dtype, chunk_size=10000) result = list(results)[0] - distances = angular_separation(result["ra"], result["dec"], ra[i], dec[i]) # Degrees + distances = angular_separation(result["ra"], result["dec"], ra[i], dec[i]) result = result[np.where(distances < radius)] import pdb pdb.set_trace() - # I could think of setting the chunksize to something really large, then only doing one chunk? - # Or maybe setting up a way to break out of the loop if everything gets really dense? + # I could think of setting the chunksize to something really large, + # then only doing one chunk? + # Or maybe setting up a way to break out of the loop if + # everything gets really dense? temp_hist = np.zeros(np.size(bins) - 1, dtype=float) counter = 0 + col_name = "phot_g_mean_mag" for chunk in results: - chunk_hist, bins = np.histogram(chunk[colName], bins) + chunk_hist, bins = np.histogram(chunk[col_name], bins) temp_hist += chunk_hist counter += chunk_size if counter >= break_limit: diff --git a/rubin_sim/maf/maps/dust_map.py b/rubin_sim/maf/maps/dust_map.py index a1223312a..098e191b2 100644 --- a/rubin_sim/maf/maps/dust_map.py +++ b/rubin_sim/maf/maps/dust_map.py @@ -8,20 +8,24 @@ class DustMap(BaseMap): - """ - Compute the E(B-V) for each point in a given spatial distribution of slicePoints. + """Add the E(B-V) values to the slice points. + + Primarily, this calls eb_vhp to read a healpix map of E(B-V) values over + the sky, then assigns ebv values to each slice_point. + If the slicer is a healpix slicer, this is trivial. + Otherwise, it either uses the nearest healpix grid point or interpolates. - Primarily, this calls eb_vhp to read a healpix map of E(B-V) values over the sky, then - assigns ebv values to each slice_point. If the slicer is a healpix slicer, this is trivial. + The key added to the slice points is `ebv`. Parameters ---------- interp : `bool`, opt - Interpolate the dust map at each slice_point (True) or just use the nearest value (False). + Interpolate the dust map at each slice_point (True) + or just use the nearest value (False). Default is False. nside : `int`, opt - Default nside value to read the dust map from disk. Primarily useful if the slicer is not - a healpix slicer. + Default nside value to read the dust map from disk. + Primarily useful if the slicer is not a healpix slicer. Default 128. map_path : `str`, opt Define a path to the directory holding the dust map files. @@ -29,16 +33,14 @@ class DustMap(BaseMap): """ def __init__(self, interp=False, nside=128, map_path=None): - """ - interp: should the dust map be interpolated (True) or just use the nearest value (False). - """ self.keynames = ["ebv"] self.interp = interp self.nside = nside self.map_path = map_path def run(self, slice_points): - # If the slicer has nside, it's a healpix slicer so we can read the map directly + # If the slicer has nside, + # it's a healpix slicer so we can read the map directly if "nside" in slice_points: if slice_points["nside"] != self.nside: warnings.warn( @@ -50,7 +52,8 @@ def run(self, slice_points): pixels=slice_points["sid"], map_path=self.map_path, ) - # Not a healpix slicer, look up values based on RA,dec with possible interpolation + # Not a healpix slicer, + # look up values based on RA,dec with possible interpolation else: slice_points["ebv"] = eb_vhp( self.nside, diff --git a/rubin_sim/maf/maps/dust_map_3d.py b/rubin_sim/maf/maps/dust_map_3d.py index f88e95d39..74e489284 100644 --- a/rubin_sim/maf/maps/dust_map_3d.py +++ b/rubin_sim/maf/maps/dust_map_3d.py @@ -11,38 +11,48 @@ class DustMap3D(BaseMap): - """The DustMap3D provides a `~rubin_sim.maf.map` to hold 3d EBV data. - - Adds the following keys to the slicePoints: - ebv3d_dists - the distances from the 3d dust map at each slice_point (in pc) - ebv3d_ebvs - the E(B-V) values corresponding to each distance at each slice_point - ebv3d_ebv_at_ - the (single) ebv value at the nearest distance to dist_pc - ebv3d_dist_at_ - the (single) distance value corresponding to where extinction and - distance modulus combine to create a m-Mo value of d_mag, for the filter specified in filtername (in pc). - Note that and will be formatted with a single decimal place. - - The additional method 'distance_at_mag' can be called either with the distances and ebv values for the - entire map or with the values from a single slice_point, in order to calculate the distance at which - extinction and distance modulus combine to create a m-Mo value closest to 'dmag' in any filter. - This is the same value as would be reported in ebv3d_dist_at_, but can be calculated on the fly, + """Add 3-d E(B-V) values to the slice points. + + The slice point dictionary keys are expanded with the following keys: + ebv3d_dists - + the distances from the 3d dust map at each slice_point (in pc) + `ebv3d_ebvs` - + the E(B-V) values corresponding to each distance at each slice_point + `ebv3d_ebv_at_` - + the (single) ebv value at the nearest distance to dist_pc + `ebv3d_dist_at_` - + the (single) distance value corresponding to where extinction and + distance modulus combine to create a m-Mo value of d_mag, for the filter + specified in filtername (in pc). + Note that and will be formatted with a + single decimal place. + + The additional method 'distance_at_mag' can be called either with the + distances and ebv values for the entire map or with the values from a + single slice_point, in order to calculate the distance at which + extinction and distance modulus combine to create a m-Mo value closest + to 'dmag' in any filter. This is the same value as would be reported in + ebv3d_dist_at_, but can be calculated on the fly, allowing variable filters and dmag values. Parameters ---------- - nside: `int` - Healpixel resolution (2^x). + nside : `int` + Healpixel resolution (2^x) to read from disk. map_file : `str`, opt Path to dust map file. interp : `bool`, opt - Should returned values be interpolated (True) or just nearest neighbor (False). + Should returned values be interpolated (True) + or just nearest neighbor (False). Default True, but is ignored if 'pixels' is provided. filtername : 'str', opt - Name of the filter (to match the lsst filter names in rubin_sim.photUtils.DustValues) - in which to calculate dust extinction magnitudes + Name of the filter (to match the lsst filter names in + rubin_sim.photUtils.DustValues) in which to calculate dust + extinction magnitudes dist_pc : `float`, opt - Distance at which to precalculate the nearest ebv value + Distance at which to precalculate the nearest ebv value (pc) d_mag : `float`, opt - Calculate the maximum distance which matches this 'd_mag' + Calculate the maximum distance which matches this `d_mag` d_mag == m-mO (dust extinction + distance modulus) r_x : `dict` {`str`: `float`}, opt Per-filter dust extinction curve coefficients. @@ -65,10 +75,12 @@ def __init__( self.filtername = filtername self.dist_pc = dist_pc self.d_mag = d_mag - # r_x is the extinction coefficient (A_v = R_v * E(B-V) .. A_x = r_x * E(B-V)) per filter - # This is equivalent to calculating A_x (using rubin_sim.photUtils.Sed.addDust) in each - # filter and setting E(B-V) to 1 [so similar to the values calculated in DustValues .. - # we probably should rename those (from Ax1 to r_x) + # r_x is the extinction coefficient (A_v = R_v * E(B-V) .. + # A_x = r_x * E(B-V)) per filter + # This is equivalent to calculating A_x + # (using rubin_sim.photUtils.Sed.addDust) in each + # filter and setting E(B-V) to 1 [so similar to the values + # calculated in DustValues. if r_x is None: self.r_x = DustValues().r_x.copy() else: @@ -82,7 +94,8 @@ def __init__( ] def run(self, slice_points): - # If the slicer has nside, it's a healpix slicer so we can read the map directly + # If the slicer has nside, + # it's a healpix slicer so we can read the map directly if "nside" in slice_points: if slice_points["nside"] != self.nside: warnings.warn( @@ -94,7 +107,8 @@ def run(self, slice_points): pixels=slice_points["sid"], map_file=self.map_file, ) - # Not a healpix slicer, look up values based on RA,dec with possible interpolation + # Not a healpix slicer, + # look up values based on RA,dec with possible interpolation else: dists, ebvs = ebv_3d_hp( self.nside, @@ -107,7 +121,8 @@ def run(self, slice_points): # Calculate the map ebv and dist values at the initialized distance dist_closest, ebv_at_dist = get_x_at_nearest_y(dists, ebvs, self.dist_pc) - # Calculate the distances at which m_minus_Mo values of 'dmag' are reached + # Calculate the distances at which m_minus_Mo values + # of 'dmag' are reached dist_dmag = self.distance_at_dmag(self.d_mag, dists, ebvs, self.filtername) slice_points["ebv3d_dists"] = dists @@ -118,21 +133,26 @@ def run(self, slice_points): return slice_points def distance_at_dmag(self, dmag, dists, ebvs, filtername=None): - # Provide this as a method which could be used for a single slice_point as well as for whole map - # (single slice_point means you could calculate this for any arbitrary magnitude or filter if needed) + # Provide this as a method which could be used for a single + # slice_point as well as for whole map + # (single slice_point means you could calculate this for any + # arbitrary magnitude or filter if needed) + # This method is here because some metrics require it. if filtername is None: filtername = self.filtername # calculate distance modulus for each distance dmods = 5.0 * np.log10(dists) - 5.0 # calculate dust extinction at each distance, for the filtername a_x = self.r_x[filtername] * ebvs - # calculate the (m-Mo) = distmod + a_x -- combination of extinction due to distance and dust + # calculate the (m-Mo) = distmod + a_x -- combination of extinction + # due to distance and dust m_minus__mo = dmods + a_x # Maximum distance for the given m-Mo (dmag) value # first do the 'within the map' closest distance m_minus__mo_at_mag, dist_closest = get_x_at_nearest_y(m_minus__mo, dists, dmag) - # calculate distance modulus for an object with the maximum dust extinction (and then the distance) + # calculate distance modulus for an object with the maximum dust + # extinction (and then the distance) if a_x.ndim == 2: dist_mods_far = dmag - a_x[:, -1] else: diff --git a/rubin_sim/maf/maps/ebv_3d_hp.py b/rubin_sim/maf/maps/ebv_3d_hp.py index aed4571ea..ba0dfafb4 100644 --- a/rubin_sim/maf/maps/ebv_3d_hp.py +++ b/rubin_sim/maf/maps/ebv_3d_hp.py @@ -19,25 +19,31 @@ def ebv_3d_hp( pixels=None, interp=False, ): - """Reads and saves a 3d dust extinction file, return extinction at specified points (ra/dec/ or pixels). + """Reads and saves a 3d dust extinction file from disk, return extinction + at specified points (ra/dec/ or pixels). Parameters ---------- - nside: `int` + nside : `int` Healpixel resolution (2^x). map_file : `str`, opt Path to dust map file. ra : `np.ndarray` or `float`, opt - RA (can take numpy array). Default None sets up healpix array of nside. Radians. + RA (can take numpy array). + Default None sets up healpix array of nside. Radians. dec : `np.ndarray` or `float`, opt - Dec (can take numpy array). Default None set up healpix array of nside. Radians. + Dec (can take numpy array). + Default None set up healpix array of nside. Radians. pixels : `np.ndarray`, opt - Healpixel IDs, to sub-select particular healpix points. Default uses all points. + Healpixel IDs, to sub-select particular healpix points. + Default uses all points. Easiest way to access healpix values. - Note that the pixels in the healpix array MUST come from a healpix grid with the same nside - as the ebv_3d_hp map. Using different nsides can potentially fail silently. + Note that the pixels in the healpix array MUST come from a h + ealpix grid with the same nside as the ebv_3d_hp map. + Using different nsides can potentially fail silently. interp : `bool`, opt - Should returned values be interpolated (True) or just nearest neighbor (False). + Should returned values be interpolated (True) + or just nearest neighbor (False). Default False. """ if (ra is None) & (dec is None) & (pixels is None): @@ -89,14 +95,16 @@ def ebv_3d_hp( f"Will use nside from map data." ) if pixels is not None: - # We're just going to raise an exception here because this could mean bad things. + # We're just going to raise an exception here + # because this could mean bad things. raise ValueError( f"Map nside {map_nside} did not match expected nside {nside}, " f"and pixels provided; this can potentially indicate a serious " f"error. Make nsides match or specify ra/dec instead of pixels." ) nside = map_nside - # Nested healpix data will not match the healpix arrays for the slicers (at this time) + # Nested healpix data will not match the healpix arrays + # for the slicers (at this time) if nested: warnings.warn("Map has nested (not ring order) data; will reorder.") for i in np.arange(0, len(dists[0])): @@ -140,7 +148,8 @@ def get_x_at_nearest_y(x, y, x_goal): the x at a single point of the map (1d array) y : `np.array` Can be either a map with y at each point in the map (2d array) or - the y at a single point of the map (1d array) - but should match x dimensionality + the y at a single point of the map (1d array) - + but should match x dimensionality x_goal : `float' The goal x value to look for the nearest y value diff --git a/rubin_sim/maf/maps/ebv_hp.py b/rubin_sim/maf/maps/ebv_hp.py index 56b115f8b..89d1b5564 100644 --- a/rubin_sim/maf/maps/ebv_hp.py +++ b/rubin_sim/maf/maps/ebv_hp.py @@ -10,22 +10,25 @@ def eb_vhp(nside, ra=None, dec=None, pixels=None, interp=False, map_path=None): - """ - Read in a healpix dust map and return values for given RA, Dec values. + """Read in a healpix dust map and return values for given RA, Dec values. This is primarily a tool for the rubin_sim.maf.DustMap class. nside : `int` Healpixel resolution (2^x). ra : `np.ndarray` or `float`, opt - RA (can take numpy array). Default None sets up healpix array of nside. Radians. + RA (can take numpy array). + Default None sets up healpix array of nside. Radians. dec : `np.ndarray` or `float`, opt - Dec (can take numpy array). Default None set up healpix array of nside. Radians. + Dec (can take numpy array). + Default None set up healpix array of nside. Radians. pixels : `np.ndarray`, opt - Healpixel IDs, to sub-select particular healpix points. Default uses all points. + Healpixel IDs, to sub-select particular healpix points. + Default uses all points. NOTE - to use a healpix map, set pixels and not ra/dec. interp : `bool`, opt - Should returned values be interpolated (True) or just nearest neighbor (False) + Should returned values be interpolated (True) + or just nearest neighbor (False) map_path : `str`, opt Path to directory containing dust map files. """ diff --git a/rubin_sim/maf/maps/gal_coords_map.py b/rubin_sim/maf/maps/gal_coords_map.py index 6e7ecfe22..3f7bf050d 100644 --- a/rubin_sim/maf/maps/gal_coords_map.py +++ b/rubin_sim/maf/maps/gal_coords_map.py @@ -7,6 +7,8 @@ class GalCoordsMap(BaseMap): + """Add `gall` and `galb` (in radians) to the slice point dictionaries.""" + def __init__(self): self.keynames = ["gall", "galb"] diff --git a/rubin_sim/maf/maps/galactic_plane_priority_maps.py b/rubin_sim/maf/maps/galactic_plane_priority_maps.py index 0cb2366a1..d05b53e45 100644 --- a/rubin_sim/maf/maps/galactic_plane_priority_maps.py +++ b/rubin_sim/maf/maps/galactic_plane_priority_maps.py @@ -17,7 +17,8 @@ def gp_priority_map_components_to_keys(filtername, science_map): - """A convenience function to make keeping the map key formats in sync in various places""" + """A convenience function to help keep the map key + formats in sync in various places""" return f"galplane_priority_{science_map}:{filtername}" @@ -40,33 +41,45 @@ def galplane_priority_map( Parameters ---------- - nside: `int` + nside : `int` Healpixel resolution (2^x). At present, this must be 64. get_keys : `bool`, opt - Set this to True to retrieve *only* the keys (such as the science map names) for the maps. + Set this to True to retrieve *only* the keys + (such as the science map names) for the maps. Default False. ra : `np.ndarray` or `float`, opt - RA (can take numpy array). Default None sets up healpix array of nside. Radians. + RA (can take numpy array). + Default None sets up healpix array of nside. Radians. dec : `np.ndarray` or `float`, opt - Dec (can take numpy array). Default None set up healpix array of nside. Radians. + Dec (can take numpy array). + Default None set up healpix array of nside. Radians. pixels : `np.ndarray`, opt - Healpixel IDs, to sub-select particular healpix points. Default uses all points. + Healpixel IDs, to sub-select particular healpix points. + Default uses all points. Easiest way to access healpix values. - Note that the pixels in the healpix array MUST come from a healpix grid with the same nside - as the galactic plane priority map. Using different nsides can potentially fail silently. + Note that the pixels in the healpix array MUST come from a + healpix grid with the same nside as the galactic plane priority map. + Using different nsides can potentially fail silently. interp : `bool`, opt - Should returned values be interpolated (True) or just nearest neighbor (False). + Should returned values be interpolated (True) + or just nearest neighbor (False). Default False. map_path : `str`, opt - Path to directory containing dust map files. Default None, uses $RUBIN_SIM_DATA_DIR/maps. + Path to directory containing dust map files. + Default None, uses $RUBIN_SIM_DATA_DIR/maps. use_alt_maps : `bool`, opt - Use the priority_GalPlane_footprint_alt_map_data_{ugrizysum}.fits files instead of the default - priority_galPlane_footprint_map_data_{ugrizysum}.fits files. Default False. + Use the priority_GalPlane_footprint_alt_map_data_{ugrizysum}.fits + files instead of the default + priority_galPlane_footprint_map_data_{ugrizysum}.fits files. + Default False. """ - # This is a function that will read the galactic plane priority map data and hold onto it indefinitely - # this also lets us use a range of slicers, as it will set the slice_point data appropriately. + # This is a function that will read the galactic plane priority map data + # and hold onto it indefinitely + # this also lets us use a range of slicers, as it will set the slice_point + # data appropriately. - # This function's primary goal is to return this information to the map, to use for the slicer. + # This function's primary goal is to return this information to the map, + # to use for the slicer. # So you MUST specify ra/dec or pixels -- or only retireve the keys if get_keys is False: if (ra is None) & (dec is None) & (pixels is None): @@ -74,8 +87,9 @@ def galplane_priority_map( # This reads and stores the galactic plane priority maps # The galactic plane priority maps are only available in nside 64 - # There are several different versions of the map - but we will almost always - # run all of the galactic plane metrics together, so we'll just read them all at once here + # There are several different versions of the map - + # but we will almost always run all of the galactic plane metrics + # together, so we'll just read them all at once here if nside != 64: raise RuntimeError("Currently only available with nside=64") @@ -134,21 +148,24 @@ def galplane_priority_map( class GalacticPlanePriorityMap(BaseMap): - """ - Read and return the galactic plane priority map data at each slice_point. + """Add the galactic plane priority map data to the slice points. - Primarily, this calls galactic_plane_priority_map to read the map data, and then assigns - the appropriate values to each slice_point. If the slicer is an nside=64 healpix slicer, this is trivial. + This calls galactic_plane_priority_map to read the map data, and then + assigns the appropriate values to each slice_point. + If the slicer is an nside=64 healpix slicer, this is trivial. (other use-cases currently experimental and not supported). + Add keys corresponding to each of the galplane priority map elements. + Parameters ---------- interp : `bool`, opt - Interpolate the dust map at each slice_point (True) or just use the nearest value (False). + Interpolate the dust map at each slice_point (True) + or just use the nearest value (False). Default is False. nside : `int`, opt - Default nside value to read the dust map from disk. Primarily useful if the slicer is not - a healpix slicer. + Default nside value to read the dust map from disk. + Primarily useful if the slicer is not a healpix slicer. Default 64. map_path : `str`, opt Define a path to the directory holding the dust map files. @@ -156,16 +173,14 @@ class GalacticPlanePriorityMap(BaseMap): """ def __init__(self, interp=False, nside=64, map_path=None): - """ - interp: should the dust map be interpolated (True) or just use the nearest value (False). - """ self.keynames = galplane_priority_map(get_keys=True) self.interp = interp self.nside = nside self.map_path = map_path def run(self, slice_points): - # If the slicer has nside, it's a healpix slicer so we can read the map directly + # If the slicer has nside, + # it's a healpix slicer so we can read the map directly if "nside" in slice_points: if slice_points["nside"] != self.nside: warnings.warn( @@ -179,7 +194,8 @@ def run(self, slice_points): ) for key in self.keynames: slice_points[key] = maps[key] - # Not a healpix slicer, look up values based on RA,dec with possible interpolation + # Not a healpix slicer, look up values based on RA,dec + # with possible interpolation else: maps = galplane_priority_map( self.nside, diff --git a/rubin_sim/maf/maps/stellar_density_map.py b/rubin_sim/maf/maps/stellar_density_map.py index 522275290..d2f4c4ad5 100644 --- a/rubin_sim/maf/maps/stellar_density_map.py +++ b/rubin_sim/maf/maps/stellar_density_map.py @@ -12,16 +12,24 @@ class StellarDensityMap(BaseMap): - """ - Return the cumulative stellar luminosity function for each slice_point. Units of stars per sq degree. - Uses a healpix map of nside=64. Uses the nearest healpix point for other ra,dec values. + """Read and hold the cumulative stellar luminosity function for + each slice point. + + The underlying stellar luminosity function map is nside = 64, and contains + stars per sq degree at a series of magnitudes (the map contains + `starLumFunc_` and `starMapBins_`). + For slice points which do not match nside=64, the map uses the nearest + healpix point on the nside=64 grid. + + The stellar luminosity function comes from the GalFast model. Parameters ---------- startype : `str` ('allstars', 'wdstars') - Load the luminosity function for all stars ('allstars'), which includes main-sequence stars - white dwarfs, blue horozontal branch, RR Lyrae, and Cepheids. The 'wdstars' option only includes - white dwarf stars. + Load the luminosity function for all stars ('allstars'), + which includes main-sequence stars + white dwarfs, blue horozontal branch, RR Lyrae, and Cepheids. + The 'wdstars' option only includes white dwarf stars. filtername : `str` Filter to use. Options of u,g,r,i,z,y """ diff --git a/rubin_sim/maf/maps/trilegal_map.py b/rubin_sim/maf/maps/trilegal_map.py index 6b321404d..ab2c7dd37 100644 --- a/rubin_sim/maf/maps/trilegal_map.py +++ b/rubin_sim/maf/maps/trilegal_map.py @@ -13,8 +13,17 @@ class TrilegalDensityMap(BaseMap): - """ - Return the cumulative stellar luminosity function for each slice_point. Units of stars per sq degree. + """Read and hold the cumulative stellar luminosity function for + each slice point. + + The underlying stellar luminosity function map is available in a + variety of nsides, and contains + stars per sq degree at a series of magnitudes (the map contains + `starLumFunc_` and `starMapBins_`). + For slice points which do not match one of the native nside options, + the map uses the nearest healpix point on the specified nside grid. + + The stellar luminosity function comes from the TRILEGAL model. Parameters ---------- @@ -48,7 +57,8 @@ def _read_map(self): self.star_map = star_map["starDensity"].copy() self.star_map_bins = star_map["bins"].copy() self.starmap_nside = hp.npix2nside(np.size(self.star_map[:, 0])) - # note, the trilegal maps are in galactic coordinates, and nested healpix. + # note, the trilegal maps are in galactic coordinates + # and use nested healpix. gal_l, gal_b = _hpid2_ra_dec(self.nside, np.arange(hp.nside2npix(self.nside)), nest=True) # Convert that to RA,dec. Then do nearest neighbor lookup. diff --git a/rubin_sim/maf/metrics/simple_metrics.py b/rubin_sim/maf/metrics/simple_metrics.py index c861514cc..134f12cad 100644 --- a/rubin_sim/maf/metrics/simple_metrics.py +++ b/rubin_sim/maf/metrics/simple_metrics.py @@ -77,8 +77,9 @@ class Coaddm5Metric(BaseMetric): Name of the filter column. """ - def __init__(self, m5_col="fiveSigmaDepth", metric_name="CoaddM5", - filter_name=None, filter_col="Filter", **kwargs): + def __init__( + self, m5_col="fiveSigmaDepth", metric_name="CoaddM5", filter_name=None, filter_col="Filter", **kwargs + ): self.filter_name = filter_name self.filter_col = filter_col self.m5_col = m5_col @@ -100,48 +101,42 @@ def run(self, data_slice, slice_point=None): class MaxMetric(BaseMetric): - """Calculate the maximum of a simData column slice. - """ + """Calculate the maximum of a simData column slice.""" def run(self, data_slice, slice_point=None): return np.max(data_slice[self.colname]) class AbsMaxMetric(BaseMetric): - """Calculate the max of the absolute value of a simData column slice. - """ + """Calculate the max of the absolute value of a simData column slice.""" def run(self, data_slice, slice_point=None): return np.max(np.abs(data_slice[self.colname])) class MeanMetric(BaseMetric): - """Calculate the mean of a simData column slice. - """ + """Calculate the mean of a simData column slice.""" def run(self, data_slice, slice_point=None): return np.mean(data_slice[self.colname]) class AbsMeanMetric(BaseMetric): - """Calculate the mean of the absolute value of a simData column slice. - """ + """Calculate the mean of the absolute value of a simData column slice.""" def run(self, data_slice, slice_point=None): return np.mean(np.abs(data_slice[self.colname])) class MedianMetric(BaseMetric): - """Calculate the median of a simData column slice. - """ + """Calculate the median of a simData column slice.""" def run(self, data_slice, slice_point=None): return np.median(data_slice[self.colname]) class AbsMedianMetric(BaseMetric): - """Calculate the median of the absolute value of a simData column slice. - """ + """Calculate the median of the absolute value of a simData column slice.""" def run(self, data_slice, slice_point=None): return np.median(np.abs(data_slice[self.colname])) @@ -295,8 +290,7 @@ def run(self, data_slice, slice_point=None): class MaxPercentMetric(BaseMetric): - """Return the percent of data which matches the maximum value of the data. - """ + """Return the percent of data which matches the maximum value of the data.""" def run(self, data_slice, slice_point=None): n_max = np.size(np.where(data_slice[self.colname] == np.max(data_slice[self.colname]))[0]) @@ -317,8 +311,7 @@ def run(self, data_slice, slice_point=None): class BinaryMetric(BaseMetric): - """Return 1 if there is data, `badval` otherwise. - """ + """Return 1 if there is data, `badval` otherwise.""" def run(self, data_slice, slice_point=None): if data_slice.size > 0: @@ -328,8 +321,7 @@ def run(self, data_slice, slice_point=None): class FracAboveMetric(BaseMetric): - """Find the fraction of data values above a given `cutoff`. - """ + """Find the fraction of data values above a given `cutoff`.""" def __init__(self, col=None, cutoff=0.5, scale=1, metric_name=None, **kwargs): # Col could just get passed in bundle with kwargs, but by explicitly pulling it out @@ -348,8 +340,7 @@ def run(self, data_slice, slice_point=None): class FracBelowMetric(BaseMetric): - """Find the fraction of data values below a given `cutoff`. - """ + """Find the fraction of data values below a given `cutoff`.""" def __init__(self, col=None, cutoff=0.5, scale=1, metric_name=None, **kwargs): if metric_name is None: @@ -366,8 +357,7 @@ def run(self, data_slice, slice_point=None): class PercentileMetric(BaseMetric): - """Find the value of a column at a given `percentile`. - """ + """Find the value of a column at a given `percentile`.""" def __init__(self, col=None, percentile=90, metric_name=None, **kwargs): if metric_name is None: @@ -519,8 +509,7 @@ def run(self, data_slice, slice_point=None): class RealMeanMetric(BaseMetric): - """Calculate the mean of a column with no nans or infs. - """ + """Calculate the mean of a column with no nans or infs.""" def run(self, data_slice, slice_point=None): return np.mean(data_slice[self.colname][np.isfinite(data_slice[self.colname])]) diff --git a/rubin_sim/maf/utils/generate_fov_map.py b/rubin_sim/maf/utils/generate_fov_map.py index a29ea9d80..8c4ca8919 100644 --- a/rubin_sim/maf/utils/generate_fov_map.py +++ b/rubin_sim/maf/utils/generate_fov_map.py @@ -1,30 +1,8 @@ import numpy as np +from rubin_scheduler.utils import gnomonic_project_toxy, gnomonic_project_tosky # Use the main stack to make a rough array. - -# Need to put these in sims_utils and remove from MAF and scheduler. - - -def gnomonic_project_toxy(ra1, dec1, r_acen, deccen): - """Calculate x/y projection of ra1/dec1 in system with center at r_acen, deccen. - Input radians. Grabbed from sims_selfcal""" - # also used in Global Telescope Network website - cosc = np.sin(deccen) * np.sin(dec1) + np.cos(deccen) * np.cos(dec1) * np.cos(ra1 - r_acen) - x = np.cos(dec1) * np.sin(ra1 - r_acen) / cosc - y = (np.cos(deccen) * np.sin(dec1) - np.sin(deccen) * np.cos(dec1) * np.cos(ra1 - r_acen)) / cosc - return x, y - - -def gnomonic_project_tosky(x, y, r_acen, deccen): - """Calculate RA/dec on sky of object with x/y and RA/Cen of field of view. - Returns Ra/dec in radians.""" - denom = np.cos(deccen) - y * np.sin(deccen) - RA = r_acen + np.arctan2(x, denom) - dec = np.arctan2(np.sin(deccen) + y * np.cos(deccen), np.sqrt(x * x + denom * denom)) - return RA, dec - - if __name__ == "__main__": import lsst.sims.utils as simsUtils from lsst.obs.lsst import LsstCamMapper diff --git a/rubin_sim/maf/web/maf_run_results.py b/rubin_sim/maf/web/maf_run_results.py index 4fef45399..d4fd42f5b 100644 --- a/rubin_sim/maf/web/maf_run_results.py +++ b/rubin_sim/maf/web/maf_run_results.py @@ -11,19 +11,24 @@ class MafRunResults: - """ - Class to read MAF's resultsDb_sqlite.db and organize the output for display on web pages. + """Read and serve the MAF resultsDb_sqlite.db database for the + show_maf jinja2 templates. Deals with a single MAF run (one output directory, one results_db) only. + + Parameters + ---------- + out_dir : `str` + The location of the results database for this run. + run_name : `str`, optional + The name of the opsim run. + If None, simply stays blank on show_maf display pages. + results_db : `str`, optional + The path to the sqlite database in `out_dir`. + If None, uses the default of `resultsDb_sqlite.db`. """ def __init__(self, out_dir, run_name=None, results_db=None): - """ - Instantiate the (individual run) layout visualization class. - - This class provides methods used by our jinja2 templates to help interact - with the outputs of MAF. - """ self.out_dir = os.path.relpath(out_dir, ".") self.run_name = run_name @@ -90,6 +95,18 @@ def convert_select_to_metrics(self, group_list, metric_id_list): """ Convert the lists of values returned by 'select metrics' template page into an appropriate dataframe of metrics (in sorted order). + + Parameters + ---------- + group_list : `list` [`str`] + The groups of metrics to show on the show_maf pages. + metric_id_list : `list` [`int`] + The integer ids of the metrics in the sqlite results database. + + Returns + ------- + metrics : `np.ndarray`, (N,) + An array of the metric information for the metrics . """ metric_ids = set() for group_subgroup in group_list: @@ -141,9 +158,8 @@ def get_npz(self, metric): def get_results_db(self): """ - Return the summary results sqlite filename. - - Note that this assumes the resultsDB is stored in 'resultsDB_sqlite.db'. + Return the summary results sqlite filename, as long as the + results data is named `resultsDb_sqlite.db`. """ return os.path.join(self.out_dir, "resultsDb_sqlite.db") @@ -188,7 +204,8 @@ def sort_metrics( """ Sort the metrics by order specified by 'order'. - Default is to sort by group, subgroup, metric name, slicer, display order, then info_label. + Default is to sort by group, subgroup, metric name, slicer, + display order, then info_label. Returns sorted numpy array. """ if len(metrics) > 0: @@ -197,7 +214,8 @@ def sort_metrics( def metrics_in_group(self, group, metrics=None, sort=True): """ - Given a group, return the metrics belonging to this group, in display order. + Given a group, return the metrics belonging to this group, + in display order. """ if metrics is None: metrics = self.metrics @@ -208,8 +226,8 @@ def metrics_in_group(self, group, metrics=None, sort=True): def metrics_in_subgroup(self, group, subgroup, metrics=None): """ - Given a group and subgroup, return a dataframe of the metrics belonging to these - group/subgroups, in display order. + Given a group and subgroup, return a dataframe of the metrics + belonging to these group/subgroups, in display order. If 'metrics' is provided, then only consider this subset of metrics. """ @@ -221,7 +239,8 @@ def metrics_in_subgroup(self, group, subgroup, metrics=None): def metrics_to_subgroups(self, metrics): """ - Given an array of metrics, return an ordered dict of their group/subgroups. + Given an array of metrics, return an ordered dict of their + group/subgroups. """ group_list = sorted(np.unique(metrics["display_group"])) groups = OrderedDict() @@ -232,7 +251,8 @@ def metrics_to_subgroups(self, metrics): def metrics_with_plot_type(self, plot_type="SkyMap", metrics=None): """ - Return an array of metrics with plot=plot_type (optional, metric subset). + Return an array of metrics with plot=plot_type + (optionally also within a metric subset). """ # Allow some variation in plot_type names for backward compatibility, # even if plot_type is a list. @@ -249,13 +269,15 @@ def metrics_with_plot_type(self, plot_type="SkyMap", metrics=None): metrics = self.metrics # Identify the plots with the right plot_type, get their IDs. plot_match = self.plots[np.in1d(self.plots["plot_type"], plot_types)] - # Convert those potentially matching metricIds to metrics, using the subset info. + # Convert those potentially matching metricIds to metrics, + # using the subset info. metrics = self.metric_ids_to_metrics(plot_match["metric_id"], metrics) return metrics def unique_metric_names(self, metrics=None, baseonly=True): """ - Return a list of the unique metric names, preserving the order of 'metrics'. + Return a list of the unique metric names, + preserving the order of 'metrics'. """ if metrics is None: metrics = self.metrics @@ -268,7 +290,8 @@ def unique_metric_names(self, metrics=None, baseonly=True): def metrics_with_summary_stat(self, summary_stat_name="Identity", metrics=None): """ - Return metrics with summary stat matching 'summary_stat_name' (optional, metric subset). + Return metrics with summary stat matching 'summary_stat_name' + (optionally, within a metric subset). """ if metrics is None: metrics = self.metrics @@ -276,7 +299,8 @@ def metrics_with_summary_stat(self, summary_stat_name="Identity", metrics=None): stats = self.stats[np.in1d(self.stats["summary_metric"], summary_stat_name)] # Identify the subset of relevant metrics. metrics = self.metric_ids_to_metrics(stats["metric_id"], metrics) - # Re-sort metrics because at this point, probably want displayOrder + info_label before metric name. + # Re-sort metrics because at this point, probably want displayOrder + # + info_label before metric name. metrics = self.sort_metrics( metrics, order=[ @@ -321,7 +345,8 @@ def unique_slicer_names(self, metrics=None): def metrics_with_slicer(self, slicer, metrics=None): """ - For an array of metrics, return the subset which match a particular 'slicername' value. + For an array of metrics, return the subset which match a + particular 'slicername' value. """ if metrics is None: metrics = self.metrics @@ -330,7 +355,8 @@ def metrics_with_slicer(self, slicer, metrics=None): def unique_metric_name_and_info_label(self, metrics=None): """ - For an array of metrics, return the unique metric names + info_label combo in same order. + For an array of metrics, return the unique metric names + + info_label combo in same order. """ if metrics is None: metrics = self.metrics @@ -351,7 +377,8 @@ def unique_metric_info_label(self, metrics=None): def metrics_with_info_label(self, info_label, metrics=None): """ - For an array of metrics, return the subset which match a particular 'info_label' value. + For an array of metrics, return the subset which match a + particular 'info_label' value. """ if metrics is None: metrics = self.metrics @@ -360,7 +387,8 @@ def metrics_with_info_label(self, info_label, metrics=None): def metrics_with_metric_name(self, metric_name, metrics=None, baseonly=True): """ - Return all metrics which match metric_name (default, only the 'base' metric name). + Return all metrics which match metric_name + (default, only the 'base' metric name). """ if metrics is None: metrics = self.metrics @@ -419,10 +447,12 @@ def plots_for_metric(self, metric): def plot_dict(self, plots=None): """ Given an array of plots (for a single metric usually). - Returns an ordered dict with 'plot_type' for interfacing with jinja2 templates. + Returns an ordered dict with 'plot_type' for interfacing with + jinja2 templates. plot_dict == {'SkyMap': {'plot_file': [], 'thumb_file', []}, 'Histogram': {}..} - If no plot of a particular type, the plot_file and thumb_file are empty lists. + If no plot of a particular type, the plot_file and thumb_file + are empty lists. Calling with plots=None returns a blank plot_dict. """ plot_dict = OrderedDict() @@ -475,10 +505,14 @@ def order_plots(self, sky_plots): Returns an ordered list of plotDicts. - The goal is to lay out the skymaps in a 3x2 grid on the MultiColor page, in ugrizy order. - If a plot for a filter is missing, add a gap. (i.e. if there is no u, keep a blank spot). - If there are other plots, with multiple filters or no filter info, they are added to the end. - If sky_plots includes multiple plots in the same filter, just goes back to displayOrder. + The goal is to lay out the skymaps in a 3x2 grid on the MultiColor + page, in ugrizy order. + If a plot for a filter is missing, add a gap. (i.e. if there is no + u band plot, keep a blank spot). + If there are other plots, with multiple filters or no filter + info, they are added to the end. + If sky_plots includes multiple plots in the same filter, + just goes back to displayOrder. """ ordered_sky_plots = [] if len(sky_plots) == 0: @@ -493,13 +527,13 @@ def order_plots(self, sky_plots): pattern = "_" + f + "_" matches = np.array([bool(re.search(pattern, x)) for x in sky_plots["plot_file"]]) match_sky_plot = sky_plots[matches] - # in pandas: match_sky_plot = sky_plots[sky_plots.plot_file.str.contains(pattern)] if len(match_sky_plot) == 1: ordered_sky_plots.append(self.plot_dict(match_sky_plot)) elif len(match_sky_plot) == 0: ordered_sky_plots.append(blank_plot_dict) else: - # If we found more than one plot in the same filter, we just go back to displayOrder. + # If we found more than one plot in the same filter, + # we just go back to displayOrder. too_many_plots = True break @@ -529,7 +563,8 @@ def order_plots(self, sky_plots): def get_sky_maps(self, metrics=None, plot_type="SkyMap"): """ - Return a numpy array of the plots with plot_type=plot_type, optionally for subset of metrics. + Return a numpy array of the plots with plot_type=plot_type, + optionally for subset of metrics. """ if metrics is None: metrics = self.metrics @@ -543,7 +578,8 @@ def get_sky_maps(self, metrics=None, plot_type="SkyMap"): def stats_for_metric(self, metric, stat_name=None): """ - Return a numpy array of summary statistics which match a given metric(s). + Return a numpy array of summary statistics which match a + given metric(s). Optionally specify a particular stat_name that you want to match. """ @@ -554,25 +590,29 @@ def stats_for_metric(self, metric, stat_name=None): def stat_dict(self, stats): """ - Returns an ordered dictionary with statName:statValue for an array of stats. + Returns an ordered dictionary with statName:statValue + for an array of stats. - Note that if you pass 'stats' from multiple metrics with the same summary names, they - will be overwritten in the resulting dictionary! + Note that if you pass 'stats' from multiple metrics with the same + summary names, they will be overwritten in the resulting dictionary! So just use stats from one metric, with unique summary_metric names. """ - # Result = dict with key == summary stat name, value = summary stat value. + # Result = dict with key + # == summary stat name, value = summary stat value. sdict = OrderedDict() statnames = self.order_stat_names(stats) for n in statnames: match = stats[np.where(stats["summary_metric"] == n)] - # We're only going to look at the first value; and this should be a float. + # We're only going to look at the first value; + # and this should be a float. sdict[n] = match["summary_value"][0] return sdict def order_stat_names(self, stats): """ - Given an array of stats, return a list containing all the unique 'summary_metric' names - in a default ordering (identity-count-mean-median-rms..). + Given an array of stats, return a list containing all the unique + 'summary_metric' names in a default ordering + (identity-count-mean-median-rms..). """ names = list(np.unique(stats["summary_metric"])) # Add some default sorting: @@ -587,8 +627,8 @@ def order_stat_names(self, stats): def all_stat_names(self, metrics): """ - Given an array of metrics, return a list containing all the unique 'summary_metric' names - in a default ordering. + Given an array of metrics, return a list containing all the + unique 'summary_metric' names in a default ordering. """ names = np.unique( self.stats["summary_metric"][np.in1d(self.stats["metric_id"], metrics["metric_id"])] diff --git a/rubin_sim/maf/web/maf_tracking.py b/rubin_sim/maf/web/maf_tracking.py index 9fcc96aef..24b12ae81 100644 --- a/rubin_sim/maf/web/maf_tracking.py +++ b/rubin_sim/maf/web/maf_tracking.py @@ -11,21 +11,18 @@ class MafTracking: - """ - Class to read MAF's tracking SQLite database (tracking a set of MAF runs) - and handle the output for web display. + """Hold and serve the MAF tracking (sqlite) database content for + the show_maf web server. + + Parameters + ---------- + database : `str`, optional + Path to the sqlite tracking database file. + If None, looks for `trackingDb_sqlite.db` default file in the + current directory. """ def __init__(self, database=None): - """ - Instantiate the (multi-run) layout visualization class. - - Parameters - ---------- - database :str - Path to the sqlite tracking database file. - If not set, looks for 'trackingDb_sqlite.db' file in current directory. - """ if database is None: database = os.path.join(os.getcwd(), "trackingDb_sqlite.db") @@ -48,13 +45,12 @@ def __init__(self, database=None): self.runs_page = {} def run_info(self, run): - """ - Provide the tracking database information relevant for a given run in a format - that the jinja2 templates can use. + """Get the tracking database information relevant for a given run + in a format that the jinja2 templates for show_maf can use. Parameters ---------- - run : `numpy.NDarray` + run : `np.ndarray`, (1,) One line from self.runs Returns @@ -80,14 +76,13 @@ def run_info(self, run): return runInfo def sort_runs(self, runs, order=["run_name", "maf_comment", "maf_run_id"]): - """ - Sort the numpy array of run data. + """Sort the numpy array of run data. Parameters ---------- - runs : `numpy.NDarray` + runs : `np.ndarray`, (N,) The runs from self.runs to sort. - order : `list` + order : `list` [`str`] The fields to use to sort the runs array. Returns @@ -98,21 +93,23 @@ def sort_runs(self, runs, order=["run_name", "maf_comment", "maf_run_id"]): return np.sort(runs, order=order) def get_run(self, maf_run_id): - """ - Set up a mafRunResults object to read and handle the data from an individual run. - Caches the mafRunResults object, meaning the metric information from a particular run - is only read once from disk. + """Set up a mafRunResults object to read and handle the data + from a single individual run. + Caches the mafRunResults object, meaning the metric information from + a particular run is only read once from disk. Parameters ---------- maf_run_id : `int` - maf_run_id value in the tracking database corresponding to a particular MAF run. + maf_run_id value in the tracking database + corresponding to a particular MAF run. Returns ------- runPage : `MafRunResults` - A MafRunResults object containing the information about a particular run. - Stored internally in self.runs_page dict, but also passed back to the tornado server. + A MafRunResults object containing the information for this run. + Stored internally in self.runs_page dict, but also passed + back to the tornado server. """ if not isinstance(maf_run_id, int): if isinstance(maf_run_id, dict): diff --git a/rubin_sim/satellite_constellations/__init__.py b/rubin_sim/satellite_constellations/__init__.py index fdec3660d..502d4d461 100644 --- a/rubin_sim/satellite_constellations/__init__.py +++ b/rubin_sim/satellite_constellations/__init__.py @@ -1,3 +1,3 @@ -from .basis_function import * # noqa: F403 -from .model_observatory import * # noqa: F403 -from .sat_utils import * # noqa: F403 +from .basis_function import * +from .model_observatory import * +from .sat_utils import * diff --git a/rubin_sim/selfcal/__init__.py b/rubin_sim/selfcal/__init__.py index 91e0e95c5..72858efe6 100644 --- a/rubin_sim/selfcal/__init__.py +++ b/rubin_sim/selfcal/__init__.py @@ -1,4 +1,4 @@ -from .generate_catalog import * # noqa: F403 -from .offsets import * # noqa: F403 -from .solver import * # noqa: F403 -from .star_tools import * # noqa: F403 +from .generate_catalog import * +from .offsets import * +from .solver import * +from .star_tools import * diff --git a/rubin_sim/skybrightness/__init__.py b/rubin_sim/skybrightness/__init__.py index 637b9806a..fd1248d6b 100644 --- a/rubin_sim/skybrightness/__init__.py +++ b/rubin_sim/skybrightness/__init__.py @@ -1,5 +1,5 @@ -from .allsky_db import * # noqa: F403 -from .interp_components import * # noqa: F403 -from .sky_model import * # noqa: F403 -from .twilight_func import * # noqa: F403 -from .utils import * # noqa: F403 +from .allsky_db import * +from .interp_components import * +from .sky_model import * +from .twilight_func import * +from .utils import * From fb763f920be3fbf69f59048750f8708a141cb943 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sun, 14 Jan 2024 23:12:19 -0800 Subject: [PATCH 12/26] isort fixes --- rubin_sim/data/rs_download_data.py | 2 +- rubin_sim/maf/utils/generate_fov_map.py | 2 +- rubin_sim/skybrightness/generate_hdf5.py | 2 +- tests/phot_utils/test_snr.py | 3 +-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/rubin_sim/data/rs_download_data.py b/rubin_sim/data/rs_download_data.py index fd186b032..2c9b4e7a9 100644 --- a/rubin_sim/data/rs_download_data.py +++ b/rubin_sim/data/rs_download_data.py @@ -3,8 +3,8 @@ import argparse from rubin_scheduler.data import DEFAULT_DATA_URL, download_rubin_data -from rubin_scheduler.data import get_data_dir as gdd from rubin_scheduler.data import get_baseline as gbd +from rubin_scheduler.data import get_data_dir as gdd def get_data_dir(): diff --git a/rubin_sim/maf/utils/generate_fov_map.py b/rubin_sim/maf/utils/generate_fov_map.py index 8c4ca8919..eabea3b11 100644 --- a/rubin_sim/maf/utils/generate_fov_map.py +++ b/rubin_sim/maf/utils/generate_fov_map.py @@ -1,5 +1,5 @@ import numpy as np -from rubin_scheduler.utils import gnomonic_project_toxy, gnomonic_project_tosky +from rubin_scheduler.utils import gnomonic_project_tosky, gnomonic_project_toxy # Use the main stack to make a rough array. diff --git a/rubin_sim/skybrightness/generate_hdf5.py b/rubin_sim/skybrightness/generate_hdf5.py index a2b9dd5a2..efdb6c0e0 100644 --- a/rubin_sim/skybrightness/generate_hdf5.py +++ b/rubin_sim/skybrightness/generate_hdf5.py @@ -4,11 +4,11 @@ import h5py import healpy as hp import numpy as np +import rubin_scheduler.utils as utils from astropy.coordinates import AltAz, EarthLocation, get_sun from astropy.time import Time import rubin_sim.skybrightness as sb -import rubin_scheduler.utils as utils def generate_sky( diff --git a/tests/phot_utils/test_snr.py b/tests/phot_utils/test_snr.py index aa2cde532..2e7454fcf 100644 --- a/tests/phot_utils/test_snr.py +++ b/tests/phot_utils/test_snr.py @@ -5,8 +5,7 @@ from rubin_scheduler.data import get_data_dir import rubin_sim.phot_utils.signaltonoise as snr -from rubin_sim.phot_utils import Bandpass, PhotometricParameters, Sed -from rubin_sim.phot_utils import scale_sky_m5 +from rubin_sim.phot_utils import Bandpass, PhotometricParameters, Sed, scale_sky_m5 class TestSNRmethods(unittest.TestCase): From 40cabd307594a723cfa23a9d7dcabd3a9c91f64f Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Wed, 17 Jan 2024 16:10:53 -0800 Subject: [PATCH 13/26] Modify readmes --- README.md | 129 ++++++++++++-------------------------------------- README_dev.md | 14 +++--- all_req.txt | 29 ------------ 3 files changed, 38 insertions(+), 134 deletions(-) delete mode 100644 all_req.txt diff --git a/README.md b/README.md index dda020531..3158f49c0 100644 --- a/README.md +++ b/README.md @@ -12,102 +12,33 @@ Scheduler, survey strategy analysis, and other simulation tools for Rubin Observ [![DOI](https://zenodo.org/badge/365031715.svg)](https://zenodo.org/badge/latestdoi/365031715) -## Installation - -### Conda Installation ### - -If you are only running `rubin_sim` code and not making changes. If you will be editing the code or need the very latest verison, use the pip instructions below. -``` -conda create -n rubin-sim -c conda-forge rubin_sim ## Create a new environment and install rubin_sim -conda activate rubin-sim -rs_download_data ## Downloads a few of data to $RUBIN_SIM_DATA_DIR (~/rubin_sim_data if unset) -conda install -c conda-forge jupyter ## Optional install of jupyter -``` -Note that this is not the best option for developers working on their own metrics - a pip installation from their own fork of the repo may work better. - -### Pip installation ### - -``` -pip install rubin-sim -``` - -Please note that the pip installation of pyoorb does not come with the necessary data files. -To actually use pyoorb, the data files are most easily installable via conda with - ``` - conda install -c conda-forge openorb-data - conda install -c conda-forge openorb-data-de405 - ``` -The pip installation of `rubin_sim` will install the pip version of `pyoorb` which is -more up-to-date compared to the conda-forge version of `openorb`. For the purposes of -`rubin_sim`, the functionality is essentially the same however. - - -### Developer Installation ### - -To install `rubin_sim` from source using pip, with all dependencies (including jupyter): -``` -git clone https://github.com/lsst/rubin_sim.git ; cd rubin_sim ## clone and cd into repo -conda create -n rubin-sim --file=all_req.txt ## optional (but recommended) new conda env -conda activate rubin-sim ## substitute mamba for conda if you like -pip install -e . --no-deps -rs_download_data ## Downloads a few GB of data to $RUBIN_SIM_DATA_DIR (~/rubin_sim_data if unset) -``` -Note that external collaborators will likely want to follow similar directions, using a fork of our rubin_sim github repo first (and then clone from there). - -### Data download for rubin_sim ### - -**Optional: Set $RUBIN_SIM_DATA_DIR data directory.** By default, `rubin_sim` will download needed data files to `$HOME/rubin_sim_data`. If you would like the data to save elsewhere, you should set the `RUBIN_SIM_DATA_DIR` environment variable. In bash `export RUBIN_SIM_DATA_DIR="/my/preferred/data/path"` (note, always make sure this is set before trying to run `rubin_sim` packages, so put in your .bashrc or whatnot). Another possibility is to set the location via sym-link, `ln -s /my/preferred/data/path ~/rubin_sim_data`. - -``` -export RUBIN_SIM_DATA_DIR=$HOME/rubin_sim_data ## Optional. Set the data directory path via env variable -rs_download_data ## Downloads a few GB of data to $RUBIN_SIM_DATA_DIR -``` -If you are only interested in a subset of the data, you can specify which directories to download, e.g. -``` -rs_download_data --dirs "throughputs,skybrightness,tests,maps" -``` - -If you have a previous installation of rubin_sim or wish to update your data download, the flag `--force` will force an update of the data in the relevant $RUBIN_SIM_DATA_DIR directories. - - -**Example notebooks** to test and further explore rubin_sim, are available at [rubin_sim_notebooks](https://github.com/lsst/rubin_sim_notebooks). -``` -git clone https://github.com/lsst/rubin_sim_notebooks.git -cd rubin_sim_notebooks -# Example: make a plot of the number of visits per pointing -jupyter notebook maf/tutorial/Survey_footprint.ipynb -``` - - -### Downloading additional skybrightness_pre skybrightness files ### - -The default skybrightness_pre directory downloaded above contains only one month of pre-calculated skybrightness files. -If you wish to run the scheduler for a longer time period, or need this information outside of the span of that month period, -you will need to download a larger set of pre-computed sky data. - -To download the entire optional set all the (43 Gb) of pre-computed sky data. -``` -rs_download_sky -``` -Note that subsets of this data can get downloaded via http directly from -``` -https://s3df.slac.stanford.edu/data/rubin/sim-data/sims_skybrightness_pre/h5_2023_09_12/ -``` -(the file names reflect the range of MJD covered within each data file). - - -## Documentation - -Online documentation is available at https://rubin-sim.lsst.io -Example jupyter notebooks can be found at: https://github.com/lsst/rubin_sim_notebooks - -To create a local build of the documentation: -``` -conda install -c conda-forge lsst-documenteer-pipelines -cd doc -make html -``` - -## Getting Help ## - -Questions about `rubin_sim` can be posted on the [sims slack channel](https://lsstc.slack.com/archives/C2LQ5JW9W), or on https://community.lsst.org/ (tag @yoachim and/or @ljones so we get notifications about it). +## rubin_sim ## + +The [Legacy Survey of Space and Time](http://www.lsst.org) (LSST) +is anticipated to encompass around 2 million observations spanning a decade, +averaging 800 visits per night. The `rubin_sim` package was built to help +understand the predicted performance of the LSST. + +The `rubin_sim` package contains the following main modules: +* `phot_utils` - provides synthetic photometry +using provided throughput curves based on current predicted performance. +* `skybrightness` incorporates the ESO +sky model, modified to match measured sky conditions at the LSST site, +including an addition of a model for twilight skybrightness. This is used +to generate the pre-calculated skybrightness data used in +[`rubin_scheduler.skybrightness_pre`](https://rubin-scheduler.lsst.io/skybrightness-pre.html). +* `moving_objects` provides a way to generate +synthetic observations of moving objects, based on how they would appear in +pointing databases ("opsims") created by +[`rubin_scheduler`](https://rubin-scheduler.lsst.io). +* `maf` the Metrics Analysis Framework, enabling efficient and +scientifically varied evaluation of the LSST survey strategy and progress +by providing a framework to enable these metrics to run in a +standardized way on opsim outputs. + +More documentation for `rubin_sim` is available at +[https://rubin-sim.lsst.io](https://rubin-sim.lsst.io), including installation instructions. + +### Getting Help ### + +Questions about `rubin_sim` can be posted on the [sims slack channel](https://lsstc.slack.com/archives/C2LQ5JW9W), or on https://community.lsst.org/c/sci/survey_strategy/ (optionally, tag @yoachim and/or @ljones so we get notifications about it). diff --git a/README_dev.md b/README_dev.md index e7eeb1d5f..6fef963e4 100644 --- a/README_dev.md +++ b/README_dev.md @@ -53,10 +53,12 @@ To update the source contents of the data files: Process for updating pre-computed files if system throughputs change. -1) update rubin_sim_data/throughputs files -2) update rubin_sim/rubin_sim/utils/sys_eng_vals.py -3) recompute sky brightness files with rubin_sim.skybrightness.recalc_mags -4) remake skybrightness_pre files with rubin_sim/rubin_sim/skybrightness_pre/data/generate_hdf5.py -5) remake dark sky map with rubin_sim/rubin_sim/skybrightness_pre/data/generate_dark_sky.py -6) tar and update files at SDF (throughputs, skybrightness, skybrightness_pre) +0) update the throughputs in syseng_throughputs (this should be the original trigger to update throughputs anywhere downstream) +1) update the throughputs in lsst/throughputs (including new tag) +2) update rubin_sim_data/throughputs data files +3) update rubin_scheduler.utils.sys_eng_vals.py - there is a notebook in syseng_throughputs which generates this file +4) recompute sky brightness files with rubin_sim.skybrightness.recalc_mags +5) remake skybrightness_pre files with rubin_sim/rubin_sim/skybrightness_pre/data/generate_hdf5.py +6) remake dark sky map with rubin_sim/rubin_sim/skybrightness_pre/data/generate_dark_sky.py +7) tar and update files at SDF (throughputs, skybrightness, skybrightness_pre) diff --git a/all_req.txt b/all_req.txt deleted file mode 100644 index 77a7c1631..000000000 --- a/all_req.txt +++ /dev/null @@ -1,29 +0,0 @@ -setuptools_scm -setuptools_scm_git_archive -numpy -matplotlib -healpy -pandas -numexpr -scipy -sqlalchemy -astropy -pytables -h5py -openorb -openorb-data-de405 -astroplan -colorcet -cycler -george -scikit-learn -requests -shapely -skyfield -tqdm -pytest -pytest-cov -pytest-black -black -ruff -rubin-scheduler From 70188c4e2dde559a15a88c4c7977a9e6f116b520 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Thu, 1 Feb 2024 00:47:40 -0800 Subject: [PATCH 14/26] Ruff - docstrings --- rubin_sim/maf/generate_ss.py | 11 +- rubin_sim/maf/maf_contrib/__init__.py | 1 + .../cadence_over_visibility_window_metric.py | 243 ------------------ .../maf/maf_contrib/calc_expected_visits.py | 100 ------- ...calculate_lsst_field_visibility_astropy.py | 212 +++++++-------- .../depth_limited_num_gal_metric.py | 49 ++-- .../maf/maf_contrib/example_new_metrics.py | 9 +- .../maf_contrib/filter_pair_t_gaps_metric.py | 46 ++-- .../maf/maf_contrib/grb_transient_metric.py | 66 +++-- .../maf/maf_contrib/gw170817_det_metric.py | 16 +- .../intervals_between_obs_metric.py | 21 +- rubin_sim/maf/maf_contrib/kne_metrics.py | 8 +- rubin_sim/maf/maf_contrib/lss_metrics.py | 18 +- .../num_obs_in_survey_time_overlap_metric.py | 9 +- rubin_sim/maf/maf_contrib/periodic_metric.py | 16 +- .../maf/maf_contrib/periodic_star_metric.py | 42 ++- .../maf/maf_contrib/star_count_mass_metric.py | 51 +++- .../maf/maf_contrib/star_count_metric.py | 44 +++- .../maf/maf_contrib/star_counts/abs_mag.py | 3 +- .../maf/maf_contrib/star_counts/coords.py | 8 +- .../maf/maf_contrib/star_counts/spec_type.py | 3 +- .../maf/maf_contrib/star_counts/starcount.py | 3 +- .../star_counts/starcount_bymass.py | 7 +- .../maf_contrib/star_counts/stellardensity.py | 3 +- rubin_sim/maf/maf_contrib/tdes_pop_metric.py | 74 ++++-- .../maf_contrib/transient_ascii_sed_metric.py | 163 ++++++------ rubin_sim/maf/maf_contrib/triplet_metric.py | 46 ++-- 27 files changed, 552 insertions(+), 720 deletions(-) delete mode 100644 rubin_sim/maf/maf_contrib/cadence_over_visibility_window_metric.py delete mode 100644 rubin_sim/maf/maf_contrib/calc_expected_visits.py diff --git a/rubin_sim/maf/generate_ss.py b/rubin_sim/maf/generate_ss.py index b9e7cfa39..5adeda349 100755 --- a/rubin_sim/maf/generate_ss.py +++ b/rubin_sim/maf/generate_ss.py @@ -95,7 +95,8 @@ def generate_ss_commands( os.mkdir(out_dir) except FileExistsError: pass - # Create the results DB so multiple threads don't try to create it later + # Create the results DB so multiple threads + # don't try to create it later # This isn't used in this script, but needs to exist on-disk. results_db = db.ResultsDb(out_dir=out_dir) for pop in pops: @@ -126,8 +127,9 @@ def generate_ss_commands( os.mkdir(out_dir) except FileExistsError: pass - # Create the results DB so multiple threads don't try to create it later - results_db = db.ResultsDb(out_dir=out_dir) + # Create the results DB so multiple threads + # don't try to create it later + results_db = db.ResultsDb(out_dir=out_dir) # noqa F841 outfile = f"{run}_ss_script.sh" if split: output_file = open(outfile, "w") @@ -136,7 +138,8 @@ def generate_ss_commands( if split: splitfiles = glob.glob(os.path.join(data_dir, "split") + f"/*{pop}*") outfile_split = outfile.replace(".sh", f"_{pop}_split.sh") - # If the output split file already exists, remove it (as we append, not write) + # If the output split file already exists, remove it + # (as we append, not write) if os.path.isfile(outfile_split): os.remove(outfile_split) for i, splitfile in enumerate(splitfiles): diff --git a/rubin_sim/maf/maf_contrib/__init__.py b/rubin_sim/maf/maf_contrib/__init__.py index d1dcc956f..2c0b18396 100644 --- a/rubin_sim/maf/maf_contrib/__init__.py +++ b/rubin_sim/maf/maf_contrib/__init__.py @@ -22,3 +22,4 @@ from .var_metrics import * from .xrb_metrics import * from .young_stellar_objects_metric import * +from .calculate_lsst_field_visibility_astropy import * diff --git a/rubin_sim/maf/maf_contrib/cadence_over_visibility_window_metric.py b/rubin_sim/maf/maf_contrib/cadence_over_visibility_window_metric.py deleted file mode 100644 index 2775e1b56..000000000 --- a/rubin_sim/maf/maf_contrib/cadence_over_visibility_window_metric.py +++ /dev/null @@ -1,243 +0,0 @@ -from sys import argv - -# from astropy.visualization import astropy_mpl_style -# plt.style.use(astropy_mpl_style) -import numpy as np -from astropy.time import Time, TimeDelta - -import rubin_sim.maf.db as db -import rubin_sim.maf.metricBundles as metricBundles -import rubin_sim.maf.slicers as slicers -from rubin_sim.maf.metrics import BaseMetric - -from .calc_expected_visits import CalcExpectedVisitsMetric - - -class CadenceOverVisibilityWindowMetric(BaseMetric): - """Metric to compare the lightcurve cadence produced by LSST over the visibility window - for a given position in the sky to the desired cadence. - - This metric determines the number of - visits to a given field (RA,Dec) performed, including all exposures taken - with the given set of filters. - - It compares the actual number of visits with the maximum possible visits, - calculated from the visibility window of the field for the given start and - end dates, and desired cadence. - - The returned result = ([sum_j (n_visits_actual / n_visits_desired)]/N_filters ) * 100 (%) - - For cadences less than 1 day, this is the sum over all anticipated visits - per night. For cadences greater than 1 day, this is calculated as a fraction - of the anticipated number of visits during batches of nights. - """ - - def __init__( - self, - filters, - cadence, - start_date, - end_date, - metric_name="CadenceOverVisibilityWindowMetric", - ra_col="fieldRA", - dec_col="fieldDec", - exp_col="visitExposureTime", - n_exp_col="numExposures", - filter_col="filter", - obstime_col="observationStartMJD", - visittime_col="visitTime", - verbose=False, - **kwargs, - ): - """Arguments: - filters list Filterset over which to compute the metric - cadence list Cadence desired for each filter in units of decimal hours - e.g. [ 0.5, 1.0, 1.2 ] - start_date string Start of observing window YYYY-MM-DD - end_date string End of observing window YYYY-MM-DD - """ - - self.filters = filters - self.cadence = cadence - self.start_date = start_date - self.end_date = end_date - self.ra_col = ra_col - self.dec_col = dec_col - self.exp_col = exp_col - self.n_exp_col = n_exp_col - self.obstime_col = obstime_col - self.visittime_col = visittime_col - self.filter_col = filter_col - self.verbose = verbose - - if len(self.filters) != len(self.cadence): - raise ValueError( - "ERROR: The list of filters requested must correspond to the list of required cadences" - ) - exit() - - columns = [ - self.ra_col, - self.dec_col, - self.exp_col, - self.n_exp_col, - self.obstime_col, - self.visittime_col, - self.filter_col, - ] - - super(CadenceOverVisibilityWindowMetric, self).__init__(col=columns, metric_name=metric_name) - - def run(self, data_slice, slice_point=None): - t = np.empty(data_slice.size, dtype=list(zip(["time", "filter"], [float, "|S1"]))) - t["time"] = data_slice[self.obstime_col] - - t_start = Time(self.start_date + " 00:00:00") - t_end = Time(self.end_date + " 00:00:00") - n_days = int((t_end - t_start).value) - dates = np.array([t_start + TimeDelta(i, format="jd", scale=None) for i in range(0, n_days, 1)]) - - result = 0.0 - - for i, f in enumerate(self.filters): - if self.verbose: - print( - "Calculating the expected visits in filter " - + f - + " given required cadence " - + str(self.cadence[i]) - ) - - # Returns a list of the number of visits per night for each pointing - pointing = [(data_slice[self.ra_col][0], data_slice[self.dec_col][0])] - - visit = CalcExpectedVisitsMetric( - pointing, - self.cadence[i], - self.start_date, - self.end_date, - self.filters[i], - self.ra_col, - self.dec_col, - verbose=self.verbose, - ) - - (n_visits_desired, hrs_visibility) = visit.run(data_slice) - - n_visits_actual = [] - - for j, d in enumerate(dates): - idx = np.where(data_slice[self.filter_col] == f) - - actual_visits_per_filter = data_slice[idx] - - tdx = np.where( - actual_visits_per_filter[self.obstime_col].astype(int) == int(d.jd - 2400000.5) - ) - - n_visits_actual.append(float(len(actual_visits_per_filter[tdx]))) - - # Case 1: Required cadence is less than 1 day, meaning we - # anticipate more than 1 observation per night - if self.cadence[i] <= 24.0: - for j, d in enumerate(dates): - if n_visits_desired[0][j] > 0: - night_efficiency = n_visits_actual[j] / float(n_visits_desired[0][j]) - - result += night_efficiency - - result = result / float(len(dates)) - - # Case 2: Required cadence is greater than 1 day, meaning we - # expect at least 1 observation within batches of nights - # self.cadence[i] long - else: - n_nights = int(self.cadence[i] / 24.0) - - for j in range(0, len(dates), n_nights): - hrs_available = (np.array(hrs_visibility[0][j : j + n_nights])).sum() - - n_actual = (np.array(n_visits_actual[j : j + n_nights])).sum() - - if hrs_available >= 1.0 and n_actual > 1: - result += 1.0 - - result = result / float(len(dates) / n_nights) - - result = (result / float(len(self.filters))) * 100.0 - - if self.verbose: - print("METRIC RESULT: Observing cadence percentage = " + str(result)) - - return result - - -def compute_metric(params): - """Function to execute the metric calculation when code is called from - the commandline""" - - obsdb = db.OpsimDatabase("/home/docmaf/my_repoes/data/baseline2018a.db") - output_dir = "/home/docmaf/" - results_db = db.ResultsDb(out_dir=output_dir) - - (propids, proptags) = obsdb.fetchPropInfo() - survey_where = obsdb.createSQLWhere(params["survey"], proptags) - - obs_params = {"verbose": params["verbose"]} - - metric = CadenceOverVisibilityWindowMetric( - params["filters"], params["cadence"], params["start_date"], params["end_date"], **obs_params - ) - - slicer = slicers.HealpixSlicer(nside=64) - sqlconstraint = survey_where - bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint) - - bgroup = metricBundles.MetricBundleGroup( - {0: bundle}, obsdb, outDir="newmetric_test", results_db=results_db - ) - bgroup.run_all() - - -if __name__ == "__main__": - if len(argv) == 1: - print("Metric requires the following commandline sequence, e.g.:") - print( - "> python CadenceOverVisibilityWindowMetric.py filters=g,r,i,z cadence=168.0,168.0,1.0,168.0 start_date=2020-01-02 end_date=2020-04-02 survey=option" - ) - print(" where:") - print(" filters may be specified as a comma-separated list without spaces") - print( - " cadence is the cadence corresponding to each filter in hours, in a comma-separated list without spaces" - ) - print(" start_date, end_date are the UTC dates of the start and end of the observing window") - print(" survey indicates which survey to select data from. Options are {WFD, DD, NES}") - - else: - params = {"verbose": False} - - for arg in argv: - try: - (key, value) = arg.split("=") - - if key == "filters": - params[key] = value.split(",") - - if key == "cadence": - cadence_list = [] - - for val in value.split(","): - cadence_list.append(float(val)) - - params[key] = cadence_list - - if key in ["start_date", "end_date", "survey"]: - params[key] = value - - except ValueError: - pass - - if "verbose" in arg: - params["verbose"] = True - - compute_metric(params) diff --git a/rubin_sim/maf/maf_contrib/calc_expected_visits.py b/rubin_sim/maf/maf_contrib/calc_expected_visits.py deleted file mode 100644 index f4257a59a..000000000 --- a/rubin_sim/maf/maf_contrib/calc_expected_visits.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Sep 25 17:11:20 2018 - -@author: rstreet -""" - -__all__ = ("CalcExpectedVisitsMetric",) - -import numpy as np - -from rubin_sim.maf.metrics import BaseMetric - -from .calculate_lsst_field_visibility_astropy import calculate_lsst_field_visibility - - -class CalcExpectedVisitsMetric(BaseMetric): - """Function to calculate the maximum possible number of visits to a - given pointing, given the expected cadence of observation and within - the date ranges given, taking target visibility into account. - - Input: - :param array ra: RAs, J2000.0, sexigesimal format - :param array dec: Decs, J2000.0, sexigesimal format - :param float cadence: Interval between successive visits in the - same single filter in hours - :param string start_date: Start of observing window YYYY-MM-DD - :param string start_date: End of observation window YYYY-MM-DD - - Output: - :param list of arrays n_visits: Number of visits possible per night - for each pointing - :param list of arrays hrs_visibility: Hours of visibility per night - for each pointing - """ - - def __init__( - self, - pointings, - cadence, - start_date, - end_date, - filter_id, - ra_col="fieldRA", - dec_col="fieldDec", - metric_name="CalcExpectedVisitsMetric", - verbose=False, - ): - """Input: - :param array ra: RAs, J2000.0, sexigesimal format - :param array dec: Decs, J2000.0, sexigesimal format - :param float cadence: Interval between successive visits in the - same single filter in hours - :param string start_date: Start of observing window YYYY-MM-DD - :param string start_date: End of observation window YYYY-MM-DD - - Output: - :param list of arrays n_visits: Number of visits possible per night - for each pointing - :param list of arrays hrs_visibility: Hours of visibility per night - for each pointing - """ - - self.pointings = pointings - self.cadence = cadence - self.start_date = start_date - self.end_date = end_date - self.filter_id = filter_id - self.ra_col = ra_col - self.dec_col = dec_col - self.verbose = verbose - - columns = [self.ra_col, self.dec_col] - - super(CalcExpectedVisitsMetric, self).__init__(col=columns, metric_name=metric_name) - - def run(self, data_slice, slice_point=None): - n_visits = [] - hrs_visibility = [] - - if self.verbose: - print("Calculating visbility for " + str(len(self.pointings)) + " fields") - - for i in range(0, len(self.pointings), 1): - # (ra, dec) = pointings[i] - ra = data_slice[self.ra_col][0] - dec = data_slice[self.dec_col][0] - - if self.verbose: - print(" -> RA " + str(ra) + ", Dec " + str(dec)) - - ( - total_time_visible, - hrs_visible_per_night, - ) = calculate_lsst_field_visibility(ra, dec, self.start_date, self.end_date, verbose=False) - - n_visits.append((np.array(hrs_visible_per_night) / self.cadence).astype(int)) - hrs_visibility.append(np.array(hrs_visible_per_night)) - - return n_visits, hrs_visibility diff --git a/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py b/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py index 507f8c60a..dcd9404f4 100644 --- a/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py +++ b/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py @@ -1,77 +1,68 @@ -# -*- coding: utf-8 -*- """ Created on Tue Sep 18 13:35:41 2018 @author: rstreet """ -__all__ = ("calculate_lsst_field_visibility", "plot_visibility") +__all__ = ("calculate_lsst_field_visibility", "calculate_lsst_field_visibility_fast") -import copy -from sys import argv -# from astropy.visualization import astropy_mpl_style -# plt.style.use(astropy_mpl_style) import astropy.units as u -import matplotlib.pylab as plt import numpy as np from astropy.coordinates import AltAz, EarthLocation, SkyCoord, get_sun from astropy.time import Time, TimeDelta +from rubin_scheduler.utils import Site +from rubin_scheduler.utils import approx_ra_dec2_alt_az + def calculate_lsst_field_visibility( - field_ra, - field_dec, - start_date, - end_date, - min_alt=30.0, - dt_days=1.0, - diagnostics=False, - verbose=False, + ra, dec, start_date, end_date, min_alt=30.0, sun_alt_limit=18.0, sample_rate=0.0007, verbose=False ): """Method to calculate the visibility of a given RA and Dec from LSST over the course of a year Adapted from an example in the Astropy docs. - Inputs: - :param float field_ra: Field RA in decimal degrees - :param float field_dec: Field Dec in decimal degrees - :param str start_date: Start date for calculations, UTC - :param str end_date: End date for calculations, UTC + Parameters + ---------- + ra : `float` + RA in decimal degrees. + dec : `float + Declination in decimal degrees + start_date : `astropy.time.Time` + Start date for calculations + end_date : `astropy.time.Time` + End date for calculations + min_alt : `float`, optional + Minimal altitude for field + sun_alt_limit : `float`, optional + Maximum sun altitude to consider for visibility + sample_rate : `float`, optional + Time spacing between visibility tests (days) + verbose : `bool`, optional + Output extra information, including debugging """ + field = SkyCoord(ra, dec, frame="icrs", unit=(u.deg, u.deg)) - field = SkyCoord(field_ra, field_dec, frame="icrs", unit=(u.hourangle, u.deg)) - + lsst_site = Site("LSST") lsst = EarthLocation( - lat=-30.239933333333333 * u.deg, - lon=-70.7429638888889 * u.deg, - height=2663.0 * u.m, + lat=lsst_site.latitude * u.deg, + lon=lsst_site.longitude * u.deg, + height=lsst_site.height * u.m, ) total_time_visible = 0.0 - t_start = Time(start_date + " 00:00:00") - t_end = Time(end_date + " 00:00:00") - cadence = 0.0007 # In days - n_days = int((t_end - t_start).value) - - dates = np.array([t_start + TimeDelta(i, format="jd", scale=None) for i in range(0, n_days, 1)]) + dates = np.arange(start_date, end_date, TimeDelta(1, format="jd", scale="tai")) target_alts = [] hrs_visible_per_night = [] hrs_per_night = [] - jds = [] for d in dates: - jds.append(d.jd) - - t = copy.copy(d) - t.out_subfmt = "date" - tstr = t.value + intervals = np.arange(0.0, 1.0, sample_rate) - intervals = np.arange(0.0, 1.0, cadence) - - dt = TimeDelta(intervals, format="jd", scale=None) + dt = TimeDelta(intervals, format="jd", scale="tai") ts = d + dt @@ -81,109 +72,100 @@ def calculate_lsst_field_visibility( alts = np.array((altaz.alt * u.deg).value) - idx = np.where(alts > min_alt)[0] + idx = np.where(alts >= min_alt)[0] sun_altaz = get_sun(ts).transform_to(frame) sun_alts = np.array((sun_altaz.alt * u.deg).value) - jdx = np.where(sun_alts < 12.0)[0] - - hrs_per_night.append(cadence * len(sun_alts[jdx]) * 24.0) - + jdx = np.where(sun_alts < sun_alt_limit)[0] + # Hours available in the sun + hrs_per_night.append(sample_rate * len(sun_alts[jdx]) * 24.0) + # The indexes where sun down and target above min_alt idx = list(set(idx).intersection(set(jdx))) - + # The highest altitude for the target, when the sun is down target_alts.append(alts[jdx].max()) if len(idx) > 0: ts_vis = ts[idx] - tvis = cadence * len(ts_vis) + tvis = sample_rate * len(ts_vis) total_time_visible += tvis - # target_alts.append(alts[idx].max()) - if verbose: - print("Target visible from LSST for " + str(round(tvis * 24.0, 2)) + "hrs on " + tstr) - + print("Target visible from LSST for " + str(round(tvis * 24.0, 2)) + "hrs on " + d.isot) + # Hours of visibility of the target on this night hrs_visible_per_night.append((tvis * 24.0)) else: # target_alts.append(-1e5) - hrs_visible_per_night.append(0.0) if verbose: - print("Target not visible from LSST on " + tstr) - - if diagnostics: - plot_visibility(jds, target_alts, sun_alts, hrs_visible_per_night, min_alt) - - return total_time_visible, hrs_visible_per_night - + print("Target not visible from LSST on " + d.isot) -def plot_visibility(ts, target_alts, sun_alts, hrs_visible_per_night, min_alt): - """Function to plot a chart of the target and solar altitude above the - horizon at the LSST site as a function of time""" + return np.array(total_time_visible), np.array(hrs_visible_per_night) - ts = np.array(ts) - target_alts = np.array(target_alts) - (fig, ax1) = plt.subplots(figsize=(10, 10)) - - plt.rcParams.update({"font.size": 18}) - plt.rc("xtick", labelsize=18) - plt.rc("ytick", labelsize=18) - plt.xticks(rotation=45.0) - - idx = np.where(target_alts > -1e5) - ax1.plot((ts - 2450000)[idx], target_alts[idx], "b-", label="Target altitude") - ax1.set_xlabel("JD") - ax1.set_ylabel(r"Maximum altitude [$^{\circ}$]", color="b") - ax1.xaxis.label.set_fontsize(18) - ax1.yaxis.label.set_fontsize(18) - for label in ax1.get_xticklabels(): - label.set_fontsize(18) - for label in ax1.get_yticklabels(): - label.set_fontsize(18) - - t = [(ts - 2450000).min(), (ts - 2450000).max()] - ax1.plot(t, [min_alt] * len(t), "g-.") - - ax1.grid(True) - ax1.tick_params("y", colors="b") - - ax2 = ax1.twinx() - ax2.plot(ts - 2450000, hrs_visible_per_night, "m--", label="Time target visible") - ax2.set_ylabel("Hours per night", color="m") - - ax2.yaxis.label.set_fontsize(18) - ax2.grid(False) - ax2.tick_params("y", colors="m") - - fig.tight_layout() - - plt.legend() +def calculate_lsst_field_visibility_fast( + ra, + dec, + start_date, + end_date, + min_alt=30.0, + sun_alt_limit=18.0, + sample_rate=0.0007, +): + """Method to calculate the visibility of a given RA and Dec from LSST + over the course of a year - plt.savefig("target_visibility_from_lsst.png") + Skips astropy calculation of alt/az which is slow and uses + approximate transform from rubin_scheduler. + + Parameters + ---------- + ra : `float` + RA in decimal degrees. + dec : `float + Declination in decimal degrees + start_date : `astropy.time.Time` + Start date for calculations + end_date : `astropy.time.Time` + End date for calculations + min_alt : `float`, optional + Minimal altitude for field + sun_alt_limit : `float`, optional + Maximum sun altitude to consider for visibility + cadence : `float`, optional + Time spacing between visibility tests (days) + + Returns + ------- + tvisible : `float` + Total time target is visible (days) + dates_visible : `np.ndarray`, (N,) + Dates that target is above min_alt and sun is below sun_alt_limit, + within start_date to end_date. + """ + lsst_site = Site("LSST") - plt.close() + dates = np.arange(start_date.mjd, end_date.mjd + sample_rate / 2.0, sample_rate) + alts, _ = approx_ra_dec2_alt_az(ra, dec, lsst_site.latitude, lsst_site.longitude, dates) + # where is the target above the minimum altitude + target_high = np.where(alts >= min_alt)[0] -if __name__ == "__main__": - if len(argv) > 1: - field_ra = argv[1] - field_dec = argv[2] - start_date = argv[3] - end_date = argv[4] + # when is the sun above the sun_alt_limit + dates = dates[target_high] + sun_locs = get_sun(Time(dates, format="mjd", scale="utc")) + sun_alts, _ = approx_ra_dec2_alt_az( + sun_locs.ra.deg, sun_locs.dec.deg, lsst_site.latitude, lsst_site.longitude, dates + ) + sun_low = np.where(sun_alts <= sun_alt_limit)[0] - else: - field_ra = input("Please enter the RA in sexigesimal format, J2000.0: ") - field_dec = input("Please enter the Dec in sexigesimal format, J2000.0: ") - start_date = input("Please enter the start date of the observing window, YYYY-MM-DD: ") - end_date = input("Please enter the end date of the observing window, YYYY-MM-DD: ") + dates = dates[sun_low] + # total amount of time target is visible, in days + tvisible = len(dates) * sample_rate - (total_time_visible, hrs_per_night) = calculate_lsst_field_visibility( - field_ra, field_dec, start_date, end_date, diagnostics=True - ) + return tvisible, dates diff --git a/rubin_sim/maf/maf_contrib/depth_limited_num_gal_metric.py b/rubin_sim/maf/maf_contrib/depth_limited_num_gal_metric.py index ca3ae032f..eef733af2 100644 --- a/rubin_sim/maf/maf_contrib/depth_limited_num_gal_metric.py +++ b/rubin_sim/maf/maf_contrib/depth_limited_num_gal_metric.py @@ -7,36 +7,42 @@ class DepthLimitedNumGalMetric(metrics.BaseMetric): - """ - This metric calculates the number of galaxies while accounting for the extragalactic footprint. + """This metric calculates the number of galaxies while accounting for the + extragalactic footprint. Parameters ---------- - m5_col: str, optional + m5_col : `str`, optional Name of column for depth in the data. Default: 'fiveSigmaDepth' - filter_col: str, optional + filter_col : `str`, optional Name of column for filter in the data. Default: 'filter' - maps: list, optional + maps : `list` [`str`], optional List of map names. Default: ['DustMap'] - nside: int, optional - HEALpix resolution parameter. Default: 256. This should match slicer nside. - filter_band: str, optional - Filter to use to calculate galaxy counts. Any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'i' - redshiftBin: str, optional + nside : `int`, optional + HEALpix resolution parameter. Default: 256. + This should match slicer nside. + filter_band : `str`, optional + Filter to use to calculate galaxy counts. + Any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'i' + redshiftBin: `str`, optional options include '0. for extended source limiting mag of x, we'd need x + 0.7 as the point-source limiting mag; + # galaxies are x2 as seeing: seeing is generally 0.7arcsec + # and a typical galaxies is 1arcsec + # => for extended source limiting mag of x, we'd need x + 0.7 + # as the point-source limiting mag; # 0.7 comes from $\sqrt{1/2}$; - # basically have x2 difference in magnitudes between point source and extended source. + # basically have x2 difference in magnitudes between + # point source and extended source. lim_mag_i_extsrc = lim_mag_i_ptsrc - 0.7 # set up the metric for galaxy counts self.galmetric = GalaxyCountsMetric( diff --git a/rubin_sim/maf/maf_contrib/example_new_metrics.py b/rubin_sim/maf/maf_contrib/example_new_metrics.py index c39c60b3c..81df40307 100644 --- a/rubin_sim/maf/maf_contrib/example_new_metrics.py +++ b/rubin_sim/maf/maf_contrib/example_new_metrics.py @@ -9,7 +9,14 @@ class NightsWithNFiltersMetric(BaseMetric): - """Count how many times more than NFilters are used within the same night, for this set of visits.""" + """Count how many times more than NFilters are used within the same night, + for this set of visits. + + Parameters + ---------- + n_filters : `int`, optional + How many filters to look for, within the same night. + """ def __init__(self, night_col="night", filter_col="filter", n_filters=3, **kwargs): """ diff --git a/rubin_sim/maf/maf_contrib/filter_pair_t_gaps_metric.py b/rubin_sim/maf/maf_contrib/filter_pair_t_gaps_metric.py index f9a3a929a..ac0e9df0b 100644 --- a/rubin_sim/maf/maf_contrib/filter_pair_t_gaps_metric.py +++ b/rubin_sim/maf/maf_contrib/filter_pair_t_gaps_metric.py @@ -6,26 +6,31 @@ class FilterPairTGapsMetric(BaseMetric): - """ - figure of merit to measure the coverage the time gaps in same and different filter pairs; + """Figure of merit to measure the coverage the time gaps in same + and different filter pairs; + FoM is defined as sum of Nv / standard deviation after a clip; - Parameters: - colname: list, ['observationStartMJD', 'filter', 'fiveSigmaDepth'] - fltpairs: filter pair, default ['uu', 'ug', 'ur', 'ui', 'uz','uy', - 'gg', 'gr', 'gi', 'gz', 'gy', - 'rr', 'ri', 'rz', 'ry', - 'ii', 'iz', 'iy', - 'zz', 'zy', - 'yy'] - mag_lim: list, fiveSigmaDepth threshold each filter, default {'u':18, 'g':18, 'r':18, 'i':18, 'z':18, 'y':18} - bins_same: np.array, bins to get histogram for same-filter pair ; - bins_diff: np.array, bins to get histogram for diff-filter pair ; - nv_clip: number of visits of pairs to clip, std is calculated below nv_clip - allgaps: boolean, all possible pairs if True, else consider only nearest - - Returns: - result: sum of fom for all filterpairs, + Parameters + ---------- + fltpairs : `list` [`str`], optional + List of filter pair sets to search for. + mag_lim : `list` [`float`] + FiveSigmaDepth threshold each filter, + default {'u':18, 'g':18, 'r':18, 'i':18, 'z':18, 'y':18} + bins_same : `np.ndarray`, (N,) + Bins to get histogram for same-filter pair. + bins_diff : `np.ndarray`, (N,) + Bins to get histogram for diff-filter pair. + nv_clip : `int`, optional + Number of visits of pairs to clip, std is calculated below nv_clip. + allgaps : `bool``, optional + All possible pairs if True, else consider only nearest + + Returns + ------- + result : `float` + sum of fom for all filterpairs, """ def __init__( @@ -95,7 +100,7 @@ def __init__( self.allgaps = allgaps - # number of visits to clip, default got from 1/10th of baseline_v2.0 WFD + # number of visits to clip, default from 1/10th of baseline_v2.0 WFD self.nv_clip = nv_clip super().__init__(col=[self.mjd_col, self.filter_col, self.m5_col], **kwargs) @@ -127,7 +132,8 @@ def _get_d_t(self, data_slice, f0, f1): d_t = dt_tri[dt_tri != 0] # flatten lower triangle else: # d_t = diffmat.flatten() - dtmax = np.max(self.bins_diff) # time gaps window for measure color + dtmax = np.max(self.bins_diff) + # time gaps window for measure color d_t = [] for time_col in time_col0: time_col_in_window = time_col1[ diff --git a/rubin_sim/maf/maf_contrib/grb_transient_metric.py b/rubin_sim/maf/maf_contrib/grb_transient_metric.py index e7a1f77fe..564858aee 100644 --- a/rubin_sim/maf/maf_contrib/grb_transient_metric.py +++ b/rubin_sim/maf/maf_contrib/grb_transient_metric.py @@ -11,60 +11,62 @@ class GRBTransientMetric(metrics.BaseMetric): """Detections for on-axis GRB afterglows decaying as - F(t) = F(1min)((t-t0)/1min)^-alpha. No jet break, for now. + F(t) = F(1min)((t-t0)/1min)^-alpha. No jet break, for now. - Derived from TransientMetric, but calculated with reduce functions to + Derived from TransientMetric, but calculated with reduce functions to enable-band specific counts. - Burst parameters taken from 2011PASP..123.1034J. + Burst parameters taken from 2011PASP..123.1034J. - Simplifications: - no color variation or evolution encoded yet. - no jet breaks. - not treating off-axis events. + Simplifications: + * no color variation or evolution encoded yet. + * no jet breaks. + * not treating off-axis events. Parameters ---------- - alpha : float, + alpha : `float`, temporal decay index Default = 1.0 - apparent_mag_1min_mean : float, + apparent_mag_1min_mean : `float`, mean magnitude at 1 minute after burst Default = 15.35 - apparent_mag_1min_sigma : float, + apparent_mag_1min_sigma : `float`, std of magnitudes at 1 minute after burst Default = 1.59 - trans_duration : float, optional + trans_duration : `float`, optional How long the transient lasts (days). Default 10. - survey_duration : float, optional + survey_duration : `float`, optional Length of survey (years). Default 10. - survey_start : float, optional + survey_start : `float`, optional MJD for the survey start date. Default None (uses the time of the first observation). - detect_m5_plus : float, optional - An observation will be used if the light curve magnitude is brighter than m5+detect_m5_plus. + detect_m5_plus : `float`, optional + An observation will be used if the light curve magnitude is brighter + than m5+detect_m5_plus. Default 0. - n_per_filter : int, optional + n_per_filter : `int`, optional Number of separate detections of the light curve above the detect_m5_plus theshold (in a single filter) for the light curve to be counted. Default 1. - n_filters : int, optional + n_filters : `int`, optional Number of filters that need to be observed n_per_filter times, with differences min_delta_mag, for an object to be counted as detected. Default 1. - min_delta_mag : float, optional + min_delta_mag : `float`, optional magnitude difference between detections in the same filter required for second+ detection to be counted. For example, if min_delta_mag = 0.1 mag and two consecutive observations differ only by 0.05 mag, those two detections will only count as one. (Better would be a SNR-based discrimination of lightcurve change.) Default 0. - n_phase_check : int, optional + n_phase_check : `int`, optional Sets the number of phases that should be checked. - One can imagine pathological cadences where many objects pass the detection criteria, - but would not if the observations were offset by a phase-shift. + One can imagine pathological cadences where many objects pass the + detection criteria, but would not if the observations were offset + by a phase-shift. Default 1. """ @@ -139,27 +141,17 @@ def light_curve(self, time, filters): return lc_mags def run(self, data_slice, slice_point=None): - """ " - Calculate the detectability of a transient with the specified lightcurve. - - Parameters - ---------- - data_slice : numpy.array - Numpy structured array containing the data related to the visits provided by the slicer. - slice_point : dict, optional - Dictionary containing information about the slice_point currently active in the slicer. - - Returns - ------- - float - The total number of transients that could be detected. + """ + Calculate the detectability of a transient with the + specified lightcurve. """ # Total number of transients that could go off back-to-back n_trans_max = np.floor(self.survey_duration / (self.trans_duration / 365.25)) tshifts = np.arange(self.n_phase_check) * self.trans_duration / float(self.n_phase_check) n_trans_max = 0 for tshift in tshifts: - # Compute the total number of back-to-back transients are possible to detect + # Compute the total number of back-to-back transients + # are possible to detect # given the survey duration and the transient duration. n_trans_max += np.floor(self.survey_duration / (self.trans_duration / 365.25)) if tshift != 0: @@ -212,7 +204,7 @@ def run(self, data_slice, slice_point=None): lc_points = lc_mags[le:ri][wdetfilt] dlc = np.abs(np.diff(lc_points)) - # number of detections in band, requring that for + # number of detections in band, requiring that for # nPerFilter > 1 that points have more than minDeltaMag # change nbanddet = np.sum(dlc > self.min_delta_mag) + 1 diff --git a/rubin_sim/maf/maf_contrib/gw170817_det_metric.py b/rubin_sim/maf/maf_contrib/gw170817_det_metric.py index 147b6d5f3..2201d6c02 100644 --- a/rubin_sim/maf/maf_contrib/gw170817_det_metric.py +++ b/rubin_sim/maf/maf_contrib/gw170817_det_metric.py @@ -1,6 +1,6 @@ # Metric for kilonova detectability based on GW170817 SED used in Scolnic et -# al. 2018 and Setzer et al. 2019. The chosen detection criteria are related to -# those used in the LSST DESC white paper detectability work and the two +# al. 2018 and Setzer et al. 2019. The chosen detection criteria are related +# to those used in the LSST DESC white paper detectability work and the two # references above. # # Contact for this code: @@ -26,24 +26,24 @@ class GW170817DetMetric(TransientAsciiSEDMetric): Parameters ----------- - ascii_file : str, optional + ascii_file : `str`, optional The ascii file containing the inputs for the SED. The file must contain three columns - ['phase', 'wave', 'flux'] - of phase/epoch (in days), wavelength (Angstroms), and flux (ergs/s/Angstrom). Default, data provided with sims_maf_contrib. - metric_name : str, optional + metric_name : `str`, optional Name of the metric, can be overwritten by user or child metric. - z: float, optional + z : `float`, optional Cosmological redshift at which to consider observations of the tranisent SED. Default 0.08. - num_filters : int, optional + num_filters : `int`, optional Number of filters that need to be observed for an object to be counted as detected. Default 2. (if num_per_lightcurve is 0, then this will be reset to 0). - filter_time : float, optional + filter_time : `float`, optional The time within which observations in at least num_filters are required (in days). Default 25.0 days. - num_phases_to_run : int, optional + num_phases_to_run : `int`, optional Sets the number of phases that should be checked. One can imagine pathological cadences where many objects pass the detection criteria, but would not if the observations were offset diff --git a/rubin_sim/maf/maf_contrib/intervals_between_obs_metric.py b/rubin_sim/maf/maf_contrib/intervals_between_obs_metric.py index d04626e21..f608e364c 100644 --- a/rubin_sim/maf/maf_contrib/intervals_between_obs_metric.py +++ b/rubin_sim/maf/maf_contrib/intervals_between_obs_metric.py @@ -1,13 +1,22 @@ # Example for IntervalsBetweenObsMetric # Somayeh Khakpash - Lehigh University # Last edited : 10/21/2020 -# Calculates statistics (mean or median or standard deviation) of intervals between observations during simultaneous windows/Inter-seasonal gap of another survey. -# SurveyIntervals is the list of the survey observing window/Inter-seasonal gap intervals. It should be in the format: -# SurveyIntervals = [ [YYYY-MM-DD, YYYY-MM-DD] , [YYYY-MM-DD, YYYY-MM-DD] , ... , [YYYY-MM-DD, YYYY-MM-DD] ] +# Calculates statistics (mean or median or standard deviation) of intervals +# between observations during simultaneous windows/Inter-seasonal gap of +# another survey. +# SurveyIntervals is the list of the survey observing window/Inter-seasonal +# gap intervals. It should be in the format: +# SurveyIntervals = [ [YYYY-MM-DD, YYYY-MM-DD] , [YYYY-MM-DD, YYYY-MM-DD] , +# ... , [YYYY-MM-DD, YYYY-MM-DD] ] # We are interested in calculating this metric in each of the LSST passbands. -# The difference between this metric and the VisitGapMetric metric is that VisitGapMetric calculates reduceFunc of gaps between observations of a data_slice throughout the whole -# baseline, but IntervalsBetweenObsMetric calculates the gaps between observations during another survey observing window. This metric combined with surveys footprint -# overlap can determine how many often another survey footprint is observed by LSST during specific time intervals. +# The difference between this metric and the VisitGapMetric metric is that +# VisitGapMetric calculates reduceFunc of gaps between observations of a +# data_slice throughout the whole +# baseline, but IntervalsBetweenObsMetric calculates the gaps between +# observations during another survey observing window. +# This metric combined with surveys footprint +# overlap can determine how many often another survey footprint is +# observed by LSST during specific time intervals. __all__ = ("IntervalsBetweenObsMetric",) import numpy as np diff --git a/rubin_sim/maf/maf_contrib/kne_metrics.py b/rubin_sim/maf/maf_contrib/kne_metrics.py index cab12f56a..45452bb69 100644 --- a/rubin_sim/maf/maf_contrib/kne_metrics.py +++ b/rubin_sim/maf/maf_contrib/kne_metrics.py @@ -438,17 +438,19 @@ def generate_kn_pop_slicer( The seed passed to np.random n_files : `int`, optional The number of different kilonova lightcurves to use - This should match the length of the filenames list passed to the KNePopMetric directly. + This should match the length of the filenames list passed + to the KNePopMetric directly. d_min : `float` or `int`, optional Minimum luminosity distance (Mpc) d_max : `float` or `int`, optional Maximum luminosity distance (Mpc) ra, dec : `np.ndarray`, (N,) or None - The ra and dec to use for event positions. Generates uniformly on the spehere if None. (degrees) + The ra and dec to use for event positions. + Generates uniformly on the spehere if None. (degrees) """ def rndm(a, b, g, size=1): - """Power-law gen for pdf(x) \propto x^{g-1} for a<=x<=b""" + """Power-law gen for pdf(x) propto x^{g-1} for a<=x<=b""" r = np.random.random(size=size) ag, bg = a**g, b**g return (ag + (bg - ag) * r) ** (1.0 / g) diff --git a/rubin_sim/maf/maf_contrib/lss_metrics.py b/rubin_sim/maf/maf_contrib/lss_metrics.py index 4de790df5..8504b7c3c 100644 --- a/rubin_sim/maf/maf_contrib/lss_metrics.py +++ b/rubin_sim/maf/maf_contrib/lss_metrics.py @@ -4,17 +4,19 @@ import numpy as np import scipy -from rubin_sim.maf.metrics import BaseMetric, Coaddm5Metric +from rubin_sim.maf.metrics import BaseMetric, ExgalM5 class GalaxyCountsMetric(BaseMetric): - """Estimate the number of galaxies expected at a particular coadded depth.""" + """Estimate the number of galaxies expected at a particular coadded depth. + """ def __init__(self, m5_col="fiveSigmaDepth", nside=128, metric_name="GalaxyCounts", **kwargs): self.m5_col = m5_col super(GalaxyCountsMetric, self).__init__(col=self.m5_col, metric_name=metric_name, **kwargs) - # Use the coadded depth metric to calculate the coadded depth at each point. - self.coaddmetric = Coaddm5Metric(m5_col=self.m5_col) + # Use the extinction corrected coadded depth metric to calculate + # the depth at each point. + self.coaddmetric = ExgalM5(m5_col=self.m5_col) # Total of 41253.0 galaxies across the sky (at what magnitude?). # This didn't seem to work quite right for me.. self.scale = 41253.0 / hp.nside2npix(nside) / 5000.0 @@ -22,7 +24,8 @@ def __init__(self, m5_col="fiveSigmaDepth", nside=128, metric_name="GalaxyCounts self.units = "Galaxy Counts" def _gal_count(self, apparent_mag, coaddm5): - # Order for galCount must be apparent mag, then coaddm5, for scipy.integrate method. + # Order for galCount must be apparent mag, then coaddm5, + # for scipy.integrate method. dn_gal = np.power(10.0, -3.52) * np.power(10.0, 0.34 * apparent_mag) completeness = 0.5 * scipy.special.erfc(apparent_mag - coaddm5) return dn_gal * completeness @@ -32,8 +35,9 @@ def run(self, data_slice, slice_point=None): coaddm5 = self.coaddmetric.run(data_slice) # Calculate the number of galaxies. # From Carroll et al, 2014 SPIE (http://arxiv.org/abs/1501.04733) - # I'm not entirely certain this gives a properly calibrated number of galaxy counts, - # however it is proportional to the expected number at least (and should be within an order of magnitude) + # I'm not entirely certain this gives a properly calibrated number + # of galaxy counts, however it is proportional to the expected number + # at least (and should be within an order of magnitude) num_gal, int_err = scipy.integrate.quad(self._gal_count, -np.inf, 32, args=coaddm5) num_gal *= self.scale return num_gal diff --git a/rubin_sim/maf/maf_contrib/num_obs_in_survey_time_overlap_metric.py b/rubin_sim/maf/maf_contrib/num_obs_in_survey_time_overlap_metric.py index 04744c0d9..beb9c9028 100644 --- a/rubin_sim/maf/maf_contrib/num_obs_in_survey_time_overlap_metric.py +++ b/rubin_sim/maf/maf_contrib/num_obs_in_survey_time_overlap_metric.py @@ -1,9 +1,12 @@ # Example for numObsInSurveyTimeOverlap # Somayeh Khakpash - Lehigh University # Last edited : 10/21/2020 -# Calculates number of observations during simultaneous windows of another survey. -# SurveyObsWin is the list of the survey observing window/inter-seasonal gap intervals. It should be in the format: -# SurveyObsWin = [ [YYYY-MM-DD, YYYY-MM-DD] , [YYYY-MM-DD, YYYY-MM-DD] , ... , [YYYY-MM-DD, YYYY-MM-DD] ] +# Calculates number of observations during simultaneous windows of another +# survey. +# SurveyObsWin is the list of the survey observing window/inter-seasonal +# gap intervals. It should be in the format: +# SurveyObsWin = [ [YYYY-MM-DD, YYYY-MM-DD] , +# [YYYY-MM-DD, YYYY-MM-DD] , ... , [YYYY-MM-DD, YYYY-MM-DD] ] __all__ = ("NumObsInSurveyTimeOverlapMetric",) diff --git a/rubin_sim/maf/maf_contrib/periodic_metric.py b/rubin_sim/maf/maf_contrib/periodic_metric.py index 26fcf8c5a..658bca69d 100644 --- a/rubin_sim/maf/maf_contrib/periodic_metric.py +++ b/rubin_sim/maf/maf_contrib/periodic_metric.py @@ -2,11 +2,13 @@ # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 3/10/2015 -# Motivation: The detection of periodic signals can be examined by using canonical signals and attempted -# to recover these. However, a more general approach would be to examine the strength in signal that is -# lost as a result of poor phase coverage. -# This metric calculates the spectral window function for a set of scheduled observations. -# The largest peak at a nonzero frequency is used as a proxy to quantify how much power is +# Motivation: The detection of periodic signals can be examined by +# using canonical signals and attempted to recover these. +# However, a more general approach would be to examine the strength in +# signal that is lost as a result of poor phase coverage. +# This metric calculates the spectral window function for a set of +# scheduled observations. The largest peak at a nonzero frequency is +# used as a proxy to quantify how much power is # lost to other frequencies. Full phase coverage will result in a value of 1. # We refer to this as the Periodic Purity Function. @@ -18,8 +20,8 @@ class PeriodicMetric(BaseMetric): - """From a set of observation times, uses code provided by Robert Siverd (LCOGT) to calculate - the spectral window function. + """From a set of observation times, uses code provided by Robert Siverd + (LCOGT) to calculate the spectral window function. """ def __init__(self, time_col="expMJD", **kwargs): diff --git a/rubin_sim/maf/maf_contrib/periodic_star_metric.py b/rubin_sim/maf/maf_contrib/periodic_star_metric.py index ebb6a543b..2cc83cfed 100644 --- a/rubin_sim/maf/maf_contrib/periodic_star_metric.py +++ b/rubin_sim/maf/maf_contrib/periodic_star_metric.py @@ -37,9 +37,35 @@ def __call__(self, t, x0, x1, x2, x3, x4, x5, x6, x7, x8): class PeriodicStarMetric(BaseMetric): - """At each slice_point, run a Monte Carlo simulation to see how well a periodic source can be fit. - Assumes a simple sin-wave light-curve, and generates Gaussain noise based in the 5-sigma limiting depth + """At each slice_point, run a Monte Carlo simulation to see how + well a periodic source can be fit. Assumes a simple sin-wave light-curve, + and generates Gaussain noise based in the 5-sigma limiting depth of each observation. + + Parameters + ---------- + period : `float` + The period to check, in days. + amplitude : `float` + The amplitude of the sinusoidal light curve, in mags. + phase : `float` + The phase of the lightcurve at the time of the first observation. + n_monte : `int` + The number of noise realizations to make in the Monte Carlo. + period_tol : `float` + The fractional tolerance on the period to require in order for a star + to be considered well-fit + amp_tol : `float` + The fractional tolerance on the amplitude. + means : `list` [`float`] + The mean magnitudes in ugrizy of the star. + mag_tol : `float` + The mean magnitude tolerance, in magnitudes, for the star to be + considered well-fit. + n_bands : `int` + Number of bands that must be within mag_tol. + seed : `int` + Random number seed for the noise realizations. """ def __init__( @@ -64,7 +90,8 @@ def __init__( period: days (default 10) amplitude: mags (default 1) n_monte: number of noise realizations to make in the Monte Carlo - period_tol: fractional tolerance on the period to demand for a star to be considered well-fit + period_tol: fractional tolerance on the period to demand for a star + to be considered well-fit amp_tol: fractional tolerance on the amplitude to demand means: mean magnitudes for ugrizy mag_tol: Mean magnitude tolerance (mags) @@ -94,8 +121,8 @@ def __init__( def run(self, data_slice, slice_point=None): # Bail if we don't have enough points - # (need to fit mean magnitudes in each of the available bands - self.means - # and for a period, amplitude, and phase) + # (need to fit mean magnitudes in each of the available bands - + # self.means and for a period, amplitude, and phase) if data_slice.size < self.means.size + 3: return self.badval @@ -128,11 +155,12 @@ def run(self, data_slice, slice_point=None): parm_vals, pcov = curve_fit( fit_obj, t["time"], true_lc + noise, p0=true_params, sigma=dmag ) - except: + except RuntimeError: parm_vals = true_params * 0 + np.inf fits[i, :] = parm_vals - # Throw out any magnitude fits if there are no observations in that filter + # Throw out any magnitude fits if there are no observations + # in that filter ufilters = np.unique(data_slice[self.filter_col]) if ufilters.size < 9: for key in list(self.filter2index.keys()): diff --git a/rubin_sim/maf/maf_contrib/star_count_mass_metric.py b/rubin_sim/maf/maf_contrib/star_count_mass_metric.py index 37cf281af..1073395b9 100644 --- a/rubin_sim/maf/maf_contrib/star_count_mass_metric.py +++ b/rubin_sim/maf/maf_contrib/star_count_mass_metric.py @@ -4,24 +4,57 @@ from rubin_sim.maf.metrics import BaseMetric -from .star_counts import * +from .star_counts import starcount_bymass # Example for CountMassMetric # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 8/15/2015 -# Motivation: The distances to stars in LSST will be signficant enough that the structure of the Galaxy will be readily apparent because of its influence on the number of stars in a given field. Any metric concerned with the number of potential objects to be detected will need to feature not only the effects of the cadence but also the number of objects per field. -# This metric identifies the number of stars in a given field in a particular mass range that will be fainter than the saturation limit of 16th magnitude and still bright enough to have noise less than 0.03 mag. M1 and M2 are the low and high limits of the mass range in solar masses. 'band' is the band for the observations to be made in. +# Motivation: The distances to stars in LSST will be significant enough that +# the structure of the Galaxy will be readily apparent because of its +# influence on the number of stars in a given field. +# Any metric concerned with the number of potential objects to be detected +# will need to feature not only the effects of the cadence but also the +# number of objects per field. +# This metric identifies the number of stars in a given field in a particular +# mass range that will be fainter than the saturation limit of 16th magnitude +# and still bright enough to have noise less than 0.03 mag. +# M1 and M2 are the low and high limits of the mass range in solar masses. +# 'band' is the band for the observations to be made in. # Requires StarCounts.StarCounts +# NOTE +# There are stellar luminosity function maps available within MAF +# that may supersede these StarCount functions class StarCountMassMetric(BaseMetric): - """Find the number of stars in a given field in the mass range fainter than magnitude 16 and bright enough to have noise less than 0.03 in a given band. M1 and m2 are the upper and lower limits of the mass range. 'band' is the band to be observed.""" - - def __init__(self, **kwargs): - self.m1 = kwargs.pop("M1", 0.9) - self.m2 = kwargs.pop("M2", 1.0) - self.band = kwargs.pop("band", "i") + """Find the number of stars in a given field in the mass range + fainter than magnitude 16 and bright enough to have noise less than + 0.03 in a given band. + M1 and m2 are the upper and lower limits of the mass range. + 'band' is the band to be observed. + + This metric uses the stellar distance and luminosity equations + contributed by Mike Lund, which are based on the Galfast model. + There are some imposed limitations on the expected magnitudes + of the stars included for the metric, based on assuming saturation + at 16th magnitude and not considering stars with magnitude + uncertainties greater than 0.03 (based on photometry/m5 alone). + + Parameters + ---------- + m1 : `float` + Lower limit of the mass range. + m2 : `float` + Upper limit of the mass range. + band : `str` + Bandpass to consider. + """ + + def __init__(self, m1=0.9, m2=1.0, band='i', **kwargs): + self.m1 = m1 + self.m2 = m2 + self.band = band super(StarCountMassMetric, self).__init__(col=[], **kwargs) def run(self, data_slice, slice_point=None): diff --git a/rubin_sim/maf/maf_contrib/star_count_metric.py b/rubin_sim/maf/maf_contrib/star_count_metric.py index e1504a963..d7f67cece 100644 --- a/rubin_sim/maf/maf_contrib/star_count_metric.py +++ b/rubin_sim/maf/maf_contrib/star_count_metric.py @@ -4,26 +4,52 @@ from rubin_sim.maf.metrics import BaseMetric -from .star_counts import * +from .star_counts import starcount # Example for CountMetric # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 8/15/2015 -# Motivation: The distances to stars in LSST will be signficant enough that the structure of the Galaxy will be readily apparent because of its influence on the number of stars in a given field. Any metric concerned with the number of potential objects to be detected will need to feature not only the effects of the cadence but also the number of objects per field. -# This metric identifies the number of stars in a given field in a particular distance range. D1 and D2 are the close and far distances in parsecs. +# Motivation: The distances to stars in LSST will be significant enough +# that the structure of the Galaxy will be readily apparent because of +# its influence on the number of stars in a given field. +# Any metric concerned with the number of potential objects to be +# detected will need to feature not only the effects of the cadence +# but also the number of objects per field. +# This metric identifies the number of stars in a given field in a +# particular distance range. +# D1 and D2 are the close and far distances in parsecs. # Requires StarCounts.StarCounts +# NOTE +# There are stellar luminosity function maps available within MAF +# that may supersede these StarCount functions class StarCountMetric(BaseMetric): - """Find the number of stars in a given field between d1 and d2 in parsecs.""" - - def __init__(self, **kwargs): - self.d1 = kwargs.pop("D1", 100) - self.d2 = kwargs.pop("D2", 1000) + """Find the number of stars in a given field between d1 and d2 in parsecs. + + This metric uses the stellar distance and luminosity equations + contributed by Mike Lund, which are based on the Galfast model. + There are some imposed limitations on the expected magnitudes + of the stars included for the metric, based on assuming saturation + at 16th magnitude and not considering stars with magnitude + uncertainties greater than 0.03 (based on photometry/m5 alone). + + + Parameters + ---------- + d1 : `float` + d1 in parsecs + d2 : `float` + d2 in parsecs + """ + + def __init__(self, d1=100, d2=1000, **kwargs): + self.d1 = d1 + self.d2 = d2 super(StarCountMetric, self).__init__(col=[], **kwargs) def run(self, data_slice, slice_point=None): self.dec_col = np.degrees(data_slice[0][3]) self.ra_col = np.degrees(data_slice[0][2]) - return starcount.starcount(self.ra_col, self.dec_col, self.d1, self.d2) + return starcount(self.ra_col, self.dec_col, self.d1, self.d2) diff --git a/rubin_sim/maf/maf_contrib/star_counts/abs_mag.py b/rubin_sim/maf/maf_contrib/star_counts/abs_mag.py index a57f2f2aa..c816ca7f4 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/abs_mag.py +++ b/rubin_sim/maf/maf_contrib/star_counts/abs_mag.py @@ -3,7 +3,8 @@ # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 8/15/2015 -# Description: Calculates absolute magnitudes as a function of spectral type. For use with Field Star Count metric +# Description: Calculates absolute magnitudes as a function of spectral type. +# For use with Field Star Count metric import sys import numpy as np diff --git a/rubin_sim/maf/maf_contrib/star_counts/coords.py b/rubin_sim/maf/maf_contrib/star_counts/coords.py index 8bfbc87a5..9fa114dd0 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/coords.py +++ b/rubin_sim/maf/maf_contrib/star_counts/coords.py @@ -3,8 +3,12 @@ # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 8/15/2015 -# Description: Provides the coordinate conversions between equatorial and galactic coordinates, as well as to galactic cylindrical coordinates. Two different functions are present that do the conversion, and a third that uses ephem package, for redundancy purposes. For use with Field Star Count metric -import math +# Description: Provides the coordinate conversions between equatorial and +# galactic coordinates, as well as to galactic cylindrical coordinates. +# Two different functions are present that do the conversion, and a third +# that uses ephem package, for redundancy purposes. +# For use with Field Star Count metric + import sys import numpy as np diff --git a/rubin_sim/maf/maf_contrib/star_counts/spec_type.py b/rubin_sim/maf/maf_contrib/star_counts/spec_type.py index e8f5c761b..22d5859a5 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/spec_type.py +++ b/rubin_sim/maf/maf_contrib/star_counts/spec_type.py @@ -3,7 +3,8 @@ # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 8/15/2015 -# Description: Calculates spectral types for stars on the main sequence as a function of stellar mass. For use with Field Star Count metric +# Description: Calculates spectral types for stars on the main sequence as +# a function of stellar mass. For use with Field Star Count metric import sys import numpy as np diff --git a/rubin_sim/maf/maf_contrib/star_counts/starcount.py b/rubin_sim/maf/maf_contrib/star_counts/starcount.py index 5220cbf0d..72a5a394c 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/starcount.py +++ b/rubin_sim/maf/maf_contrib/star_counts/starcount.py @@ -3,7 +3,8 @@ # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 8/15/2015 -# Description: Calculates the number of stars in a given direction and between a given set of distances. For use with Field Star Count metric +# Description: Calculates the number of stars in a given direction and +# between a given set of distances. For use with Field Star Count metric import math import sys diff --git a/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py b/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py index aa8d5dada..598d30cb9 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py +++ b/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py @@ -3,8 +3,11 @@ # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 8/15/2015 -# Description: Takes a given set of galactic coordinates and a stellar mass range, then calculates the number of stars within that range that will be fainter than mag 16, and have sufficiently low noise in the given band. For use with Field Star Count metric -import math +# Description: Takes a given set of galactic coordinates and a stellar +# mass range, then calculates the number of stars within that range +# that will be fainter than mag 16, and have sufficiently low noise +# in the given band. For use with Field Star Count metric + import sys import numpy as np diff --git a/rubin_sim/maf/maf_contrib/star_counts/stellardensity.py b/rubin_sim/maf/maf_contrib/star_counts/stellardensity.py index be8eb74ae..d76bce89a 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/stellardensity.py +++ b/rubin_sim/maf/maf_contrib/star_counts/stellardensity.py @@ -3,7 +3,8 @@ # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 8/15/2015 -# Description: Calculates the stellar density based off of Juric et al 2008 and Jackson et al 2002. For use with Field Star Count metric +# Description: Calculates the stellar density based off of +# Juric et al 2008 and Jackson et al 2002. For use with Field Star Count metric import math import sys diff --git a/rubin_sim/maf/maf_contrib/tdes_pop_metric.py b/rubin_sim/maf/maf_contrib/tdes_pop_metric.py index 353a3af94..f0d177924 100644 --- a/rubin_sim/maf/maf_contrib/tdes_pop_metric.py +++ b/rubin_sim/maf/maf_contrib/tdes_pop_metric.py @@ -18,8 +18,9 @@ class TdeLc: Parameters ---------- - file_list : list of str (None) - List of file paths to load. If None, loads up all the files from data/tde/ + file_list : `list` [`str`], opt + List of file paths to load. + If None, loads up all the files from $RUBIN_SIM_DATA/maf/tde/ """ def __init__(self, file_list=None): @@ -97,7 +98,8 @@ def _pre_peak_detect(self, data_slice, slice_point, mags, t): Simple detection criteria """ result = 0 - # Simple alert criteria. Could make more in depth, or use reduce functions + # Simple alert criteria. + # Could make more in depth, or use reduce functions # to have multiple criteria checked. pre_peak_detected = np.where((t < 0) & (mags < data_slice[self.m5_col]))[0] @@ -176,6 +178,31 @@ def reduce_some_color_pu(self, metric): class TdePopMetricQuality(metrics.BaseMetric): + """Evaluate the likelihood of detecting a specific TDE. + Works with the TDEPopSlicer, which adds TDE events to the slice_points. + + Returns 0 (not detected) or 1 (detected) for TDEs with various + detection criteria. + 'some_color' requires 1 detection pre-peak, 3 detections in different + filters within 10 days of the peak, and 2 detections in different bands + within tmax post-peak. Averages 1 detection every other night. + 'some_color_pu' has similar requirements, but constrains one + of the near-peak detections to be in u band and 1 of the + post-peak detections to be in u band. + + + Parameters + ---------- + tmin : `float`, opt + Minimum time for first detection (days) + tmax : `float`, opt + Maximum time in the lightcurve for detection (days). + file_list : `list` [`str`], opt + The names of the TDE lightcurve data files. + mjd0 : `float`, opt + The start of the survey. + """ + def __init__( self, metric_name="TDEsPopMetricQuality", @@ -212,7 +239,7 @@ def __init__( **kwargs, ) - def _some_color_pnum_detect(self, data_slice, slice_point, mags, t): + def _some_color_pnum_detect(self, data_slice, mags, t): # 1 detection pre peak pre_peak_detected = np.where((t < -10) & (mags < data_slice[self.m5_col]))[0] if np.size(pre_peak_detected) < 1: @@ -231,17 +258,20 @@ def _some_color_pnum_detect(self, data_slice, slice_point, mags, t): # count number of data points in the light curve obs_points = np.where((t > self.tmin) & (t < self.tmax) & (mags < data_slice[self.m5_col]))[0] - # define the time range around peak in which the number of data points is measured + # define the time range around peak in which the number of + # data points is measured t_range = self.tmax - self.tmin - # number of data points / time range gives a "score" for light curve quality + # number of data points / time range gives a "score" for + # light curve quality # 0: did not pass some_color requirements; - # 1: passed some_color requirements and has 1 data point every other night + # 1: passed some_color requirements and has 1 data point + # every other night nresult = np.size(obs_points) / t_range return nresult - def _some_color_pu_pnum_detect(self, data_slice, slice_point, mags, t): + def _some_color_pu_pnum_detect(self, data_slice, mags, t): # 1 detection pre peak pre_peak_detected = np.where((t < -10) & (mags < data_slice[self.m5_col]))[0] if np.size(pre_peak_detected) < 1: @@ -266,17 +296,20 @@ def _some_color_pu_pnum_detect(self, data_slice, slice_point, mags, t): # count number of data points in the light curve obs_points = np.where((t > self.tmin) & (t < self.tmax) & (mags < data_slice[self.m5_col]))[0] - # define the time range around peak in which the number of data points is measured + # define the time range around peak in which the number of + # data points is measured t_range = self.tmax - self.tmin - # number of data points / time range gives a "score" for light curve quality + # number of data points / time range gives a "score" for + # light curve quality # 0: did not pass some_color_pu requirements; - # 1: passed some_color_pu requirements and has 1 data point every other night + # 1: passed some_color_pu requirements and has 1 data point + # every other night nresult = np.size(obs_points) / t_range return nresult - def run(self, data_slice, slice_point=None): + def run(self, data_slice, slice_point): result = {} t = data_slice[self.mjd_col] - self.mjd0 - slice_point["peak_time"] mags = np.zeros(t.size, dtype=float) @@ -287,8 +320,8 @@ def run(self, data_slice, slice_point=None): # Apply dust extinction on the light curve mags[infilt] += self.ax1[filtername] * slice_point["ebv"] - result["some_color_pnum"] = self._some_color_pnum_detect(data_slice, slice_point, mags, t) - result["some_color_pu_pnum"] = self._some_color_pu_pnum_detect(data_slice, slice_point, mags, t) + result["some_color_pnum"] = self._some_color_pnum_detect(data_slice, mags, t) + result["some_color_pu_pnum"] = self._some_color_pu_pnum_detect(data_slice, mags, t) return result @@ -300,19 +333,20 @@ def reduce_some_color_pu_pnum(self, metric): def generate_tde_pop_slicer(t_start=1, t_end=3652, n_events=10000, seed=42, n_files=7): - """Generate a population of TDE events, and put the info about them into a UserPointSlicer object + """Generate a population of TDE events, + and put the info about them into a UserPointSlicer object. Parameters ---------- - t_start : float (1) + t_start : `float`, opt The night to start tde events on (days) - t_end : float (3652) + t_end : `float`, opt The final night of TDE events - n_events : int (10000) + n_events : `int`, opt The number of TDE events to generate - seed : float + seed : `float`, opt The seed passed to np.random - n_files : int (7) + n_files : `int`, opt The number of different TDE lightcurves to use """ diff --git a/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py b/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py index 814dd02b6..296d0c76d 100644 --- a/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py +++ b/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py @@ -19,7 +19,7 @@ from sncosmo import Model, TimeSeriesSource, read_griddata_ascii except ImportError: pass -from astropy.cosmology import Planck15 as cosmo +from astropy.cosmology import Planck15 as cosmo ## noqa N813 from rubin_sim.maf.metrics import BaseMetric from rubin_sim.maf.utils import m52snr @@ -35,43 +35,43 @@ class TransientAsciiSEDMetric(BaseMetric): Parameters ----------- - ascii_file : str + ascii_file : `str` The ascii file containing the inputs for the SED. The file must contain three columns - ['phase', 'wave', 'flux'] - of phase/epoch (in days), wavelength (Angstroms), and flux (ergs/s/Angstrom). - metric_name : str, optional + metric_name : `str`, optional Name of the metric, can be overwritten by user or child metric. - survey_duration : float, optional + survey_duration : `float`, optional Length of survey (years). Default 10 or maximum of timespan of observations. - survey_start : float, optional + survey_start : `float`, optional MJD for the survey start date. Default None (uses the time of the first observation at each pointing). - detect_snr : dict, optional + detect_snr : `dict`, optional An observation will be counted toward the discovery criteria if the light curve SNR is higher than detect_snr (specified per bandpass). Values must be provided for each filter which should be considered in the lightcurve. Default is {'u': 5, 'g': 5, 'r': 5, 'i': 5, 'z': 5, 'y': 5} - z: float, optional + z : `float`, optional Cosmological redshift at which to consider observations of the tranisent SED. - num_pre_time : int, optional + num_pre_time : `int`, optional Number of observations (in any filter(s)) to demand before pre_time, before saying a transient has been detected. Default 0. - pre_time : float, optional + pre_time : `float`, optional The time by which num_pre_time detections are required (in days). Default 5.0. - num_filters : int, optional + num_filters : `int`, optional Number of filters that need to be observed for an object to be counted as detected. Default 1. (if num_per_lightcurve is 0, then this will be reset to 0). - filter_time : float, optional + filter_time : `float`, optional The time within which observations in at least num_filters are required (in days). Default None (no time constraint). - num_per_lightcurve : int, optional + num_per_lightcurve : `int`, optional Number of sections of the light curve that must be sampled above the detect_snr theshold for the light curve to be counted. For example, num_per_lightcurve = 2 means a light curve is only @@ -79,12 +79,12 @@ class TransientAsciiSEDMetric(BaseMetric): half of the LC, and at least one in the second half of the LC. num_per_lightcurve = 4 means each quarter of the light curve must be detected to count. Default 1. - num_phases_to_run : int, optional + num_phases_to_run : `int`, optional Sets the number of phases that should be checked. One can imagine pathological cadences where many objects pass the detection criteria, but would not if the observations were offset by a phase-shift. Default 1. - output_data : bool, optional + output_data : `bool`, optional If True, metric returns full lightcurve at each point. Note that this will potentially create a very large metric output data file. If False, metric returns the number of transients detected. @@ -144,7 +144,8 @@ def __init__( if self.num_per_lightcurve == 0: self.num_filters = 0 self.num_phases_to_run = num_phases_to_run - # Read ascii lightcurve template here. It doesn't change per slice_point. + # Read ascii lightcurve template here. + # It doesn't change per slice_point. self.read_sed(ascii_file) def read_sed(self, ascii_file): @@ -157,7 +158,7 @@ def read_sed(self, ascii_file): Parameters ----------- - ascii_file: str + ascii_file : `str` string containing the path to the ascii file containing the SED evolution. @@ -177,9 +178,10 @@ def read_sed(self, ascii_file): def make_model(self): """ Wrapper function to take the phase, wave, and flux information from the - provided ascii file and create an sncosmo Model object, and consistently - redshift that model given initialization Parameters. This sets the - transient model in rest frame, and transient model in observer frame, + provided ascii file and create an sncosmo Model object, + and consistently redshift that model given initialization Parameters. + This sets the transient model in rest frame, and transient model in + observer frame, i.e., it is cosmologically redshifted. """ # Set the source model with sncosmo API. @@ -188,7 +190,7 @@ def make_model(self): # Use deepcopy to make ensure full class is saved as attribute of new # class. self.transient_model = deepcopy(Model(source=source)) - # With the Model set, apply the cosmological redshift specfied at + # With the Model set, apply the cosmological redshift specified at # initialization. self.set_redshift() @@ -223,14 +225,14 @@ def make_lightcurve(self, time, filters): Parameters ---------- - time : `numpy.ndarray` + time : `np.ndarray`, (N,) The times of the observations. filters : `list` [`str`] The filters of the observations. ['u','g','r',...] format. Returns ------- - light_curve_mags : `numpy.ndarray` + light_curve_mags : `np.ndarray`, (N,) The magnitudes of the object at the times and in the filters of the observations. """ @@ -250,8 +252,8 @@ def make_lightcurve(self, time, filters): # to sncosmo documentation. for obs_time in flt_times: filter_mag.append(redshifted_model.bandmag("lsst" + flt, "ab", obs_time)) - # Set light_curve_mags for array indices corresponding to observations of the - # current filter. + # Set light_curve_mags for array indices corresponding to + # observations of the current filter. light_curve_mags[np.where(filters == flt)[0]] = np.array(filter_mag) self.light_curve_mags = light_curve_mags @@ -262,16 +264,16 @@ def evaluate_all_detection_criteria(self, data_slice): Parameters ----------- - data_slice : numpy.array + data_slice : `np.array` Numpy structured array containing the data related to the visits provided by the slicer. Returns -------- - transient_detected: np.array, `bool` + transient_detected : `np.array`, (`bool`,) Array containing `bool` tracking variable whether transient is detected by passing all criteria. - num_detected: int + num_detected : `int` Scalar value of the number of transients that were detected in total between all phase shifts considered. @@ -294,23 +296,27 @@ def evaluate_all_detection_criteria(self, data_slice): ] self.evaluate_pre_time_detection_criteria(t_id) - # Check if previous condition passed. If not, move to next transient. + # Check if previous condition passed. + # If not, move to next transient. if not self.transient_detected[t_id]: continue self.evaluate_phase_section_detection_criteria(t_id) - # Check if previous condition passed. If not, move to next transient. + # Check if previous condition passed. + # If not, move to next transient. if not self.transient_detected[t_id]: continue self.evaluate_number_filters_detection_criteria(data_slice, start_ind, end_ind, t_id) - # Check if previous condition passed. If not, move to next transient. + # Check if previous condition passed. + # If not, move to next transient. if not self.transient_detected[t_id]: continue self.evaluate_filter_in_time_detection_criteria(t_id) - # Check if previous condition passed. If not, move to next transient. - # Note: this last if block is techinically unnecessary but if + # Check if previous condition passed. + # If not, move to next transient. + # Note: this last if block is technically unnecessary but if # further criteria are added then the if block should be copied # afterwards. if not self.transient_detected[t_id]: @@ -329,8 +335,8 @@ def evaluate_pre_time_detection_criteria(self, t_id): Parameters ----------- - t_id: int - The transient id of the currently evaluted transient. + t_id : `int` + The transient id of the currently evaluated transient. """ # If we did not get enough detections before pre_time, set # transient_detected to False. @@ -345,8 +351,8 @@ def evaluate_phase_section_detection_criteria(self, t_id): Parameters ----------- - t_id: int - The transient id of the currently evaluted transient. + t_id : `int` + The transient id of the currently evaluated transient. """ # If we did not get detections over enough sections of the # lightcurve, set tranisent_detected to False. @@ -358,22 +364,22 @@ def evaluate_phase_section_detection_criteria(self, t_id): def evaluate_number_filters_detection_criteria(self, data_slice, start_ind, end_ind, t_id): """ - Function to evaluate if the current transient passes the required number - of detections in different filters. + Function to evaluate if the current transient passes the required + number of detections in different filters. Parameters ----------- - data_slice : numpy.array + data_slice : `np.array`, (N,) Numpy structured array containing the data related to the visits provided by the slicer. - start_ind: int + start_ind : `int` Starting index for observations of the specific transient being evaluated. - end_ind: int + end_ind : `int` Ending index for observations of the specific transient being evaluated. - t_id: int - The transient id of the currently evaluted transient. + t_id : `int` + The transient id of the currently evaluated transient. """ # If we did not get detections in enough filters, set transient # detected to False. @@ -391,7 +397,7 @@ def evaluate_filter_in_time_detection_criteria(self, t_id): Parameters ----------- - t_id: int + t_id : `int` The transient id of the currently evaluted transient. """ # If we did not get detections in enough filters within required @@ -417,36 +423,36 @@ def evaluate_filter_in_time_detection_criteria(self, t_id): def setup_phase_shift_dependent_variables(self, time_shift, data_slice): """ - Wrapper function to initilaize variables that will change for each + Wrapper function to initialize variables that will change for each phase shift that is considered. Parameters ----------- - time_shift: float + time_shift : `float` The offset given the currently considered phase shift by which to cyclically shift the SED evolution. - data_slice : numpy.array + data_slice : `np.array`, (N,) Numpy structured array containing the data related to the visits provided by the slicer. Returns ---------- - max_num_transients: int + max_num_transients : `int` Updated number of the total simulated transients. - observation_epoch: np.array + observation_epoch : `np.array`, (N,) Array of transient light curve phases of observations of transients within this phase shift cycle. - transient_id: np.array, int + transient_id : `np.array`, (N,) Array of all the transient ids within this phase shift cycle, - regardless of whether it is observed. - transient_id_start: int + regardless of whether it is observed. dtype int. + transient_id_start : `int` Updated starting id for next phase shift loop. - transient_start_index: np.array, int + transient_start_index : `np.array`, (N,) Array of the indicies for each transient that are the start of - their observations in the observation array. - transient_end_index: np.array, int + their observations in the observation array. dtype int. + transient_end_index: `np.array`, (N,) Array of the indicies for each transient that are the end of - their observations in the observation array. + their observations in the observation array. dtype int. """ # Update the maximum possible transients that could have been # observed during survey_duration. @@ -471,23 +477,23 @@ def setup_phase_shift_dependent_variables(self, time_shift, data_slice): def setup_run_metric_variables(self, data_slice): """ - Wrapper function to handle basic initialization of variables used to run - this metric. + Wrapper function to handle basic initialization of variables used + to run this metric. Parameters ----------- - data_slice : numpy.array + data_slice : `np.array`, (N,) Numpy structured array containing the data related to the visits provided by the slicer. Returns --------- - data_slice : numpy.array + data_slice : `np.array`, (N,) Now sorted in time. - survey_duration: float + survey_duration : `float` Defaults to the maximum between the chosen slicer and the user specified duration given to the metric. - survey_start: float + survey_start : `float` Defaults to user specified, or metric default, however if it is not defined sets to the earliest time in the given slicer. """ @@ -514,21 +520,21 @@ def initialize_phase_loop_variables(self, data_slice): Parameters ----------- - data_slice : numpy.array + data_slice : `np.array`, (N,) Numpy structured array containing the data related to the visits provided by the slicer. Returns --------- - time_phase_shifts: np.array + time_phase_shifts : `np.array`, (N,) The phase offsets over which to iterate detections given the specfied number of phases to run. - num_detected: int + num_detected : `int` Initialized variable for the number detected, set to zero. - max_num_transients: int + max_num_transients : `int` Initialized variable for the total transients that are simulated counting the multiplicity due to phase shifts. - transient_id_start: int + transient_id_start : `int` The starting id for simulated transients that are observed. This accounts for if the requested length of the data_slice and the number of simulated transient observations mismatch the number @@ -541,7 +547,7 @@ def initialize_phase_loop_variables(self, data_slice): self.time_phase_shifts = ( np.arange(self.num_phases_to_run) * self.transient_duration / float(self.num_phases_to_run) ) - # Total number of transient which have reached detection threshholds. + # Total number of transient which have reached detection thresholds. self.num_detected = 0 # Total number of transients which could possibly be detected, # given survey duration and transient duration. @@ -554,26 +560,26 @@ def initialize_phase_loop_variables(self, data_slice): def evaluate_snr_thresholds(self, data_slice): """ - Take the given data_slice and the set SNR thresholds for observations to - be considered in further detections and compute which observations + Take the given data_slice and the set SNR thresholds for observations + to be considered in further detections and compute which observations pass. Parameters ----------- - data_slice : numpy.array + data_slice : `np.array`, (N,) Numpy structured array containing the data related to the visits provided by the slicer. Returns -------- - obs_above_SNR_threshold: np.array, `bool` + obs_above_SNR_threshold: `np.array`, (N,) `bool` array corresponding to all observations and whether or not, given their filter specified SNR threshold, they pass this thresholding cut. """ # Initilize array for observations below or above SNR threshold self.obs_above_snr_threshold = np.zeros(len(self.light_curve_SNRs), dtype=bool) - # Identify which detections rise above the required SNR threshhold + # Identify which detections rise above the required SNR threshold # in each filter. for filt in np.unique(data_slice[self.filter_col]): # Find the indices for observations in current filter. @@ -595,16 +601,16 @@ def run(self, data_slice, slice_point=None): Parameters ---------- - data_slice : numpy.array + data_slice : `np.array`, (N,) Numpy structured array containing the data related to the visits provided by the slicer. - slice_point : dict, optional + slice_point : `dict`, optional Dictionary containing information about the slice_point currently active in the slicer. Returns ------- - float or dict + result : `float` or `dict` The fraction of transients that could be detected. (if output_data is False) Otherwise, a dictionary with arrays of 'transient_id', 'lcMag', 'detected', 'expMJD', @@ -625,11 +631,12 @@ def run(self, data_slice, slice_point=None): # Check observations above the defined threshold for detection. self.evaluate_snr_thresholds(data_slice) - # With useable observations computed, evaluate all detection criteria + # With useable observations computed, + # evaluate all detection criteria self.evaluate_all_detection_criteria(data_slice) if self.output_data: - # Output all the light curves, regardless of detection threshhold, + # Output all the light curves, regardless of detection threshold, # but indicate which were 'detected'. # Only returns for one phase shift, not all. return { diff --git a/rubin_sim/maf/maf_contrib/triplet_metric.py b/rubin_sim/maf/maf_contrib/triplet_metric.py index 5707122ee..e8ac63931 100644 --- a/rubin_sim/maf/maf_contrib/triplet_metric.py +++ b/rubin_sim/maf/maf_contrib/triplet_metric.py @@ -2,8 +2,16 @@ # Mike Lund - Vanderbilt University # mike.lund@gmail.com # Last edited 9/6/2014 -# Motivation: The detection of nonperiodic transient events can be thought of as most simply being accomplished by a set of three observations, one before the event occurs, a second after the event has begun, and a third to confirm the event is real. -# This metric identifies the number of triplets that will occur. DelMin and DelMax set the smallest and largest intervals that can occur between the first and second point and between the second and third point. This can be set to reflect the timescales for various events. RatioMax and RatioMin set constraints on how similar the two intervals must be. RatioMin can never be less than 1. +# Motivation: The detection of nonperiodic transient events can be thought of +# as most simply being accomplished by a set of three observations, +# one before the event occurs, a second after the event has begun, +# and a third to confirm the event is real. +# This metric identifies the number of triplets that will occur. +# DelMin and DelMax set the smallest and largest intervals that can occur +# between the first and second point and between the second and third point. +# This can be set to reflect the timescales for various events. +# RatioMax and RatioMin set constraints on how similar the two intervals +# must be. RatioMin can never be less than 1. __all__ = ("TripletMetric", "TripletBandMetric") @@ -13,9 +21,11 @@ class TripletMetric(BaseMetric): - """Find the number of 'triplets' of three images taken in any band, based on user-selected minimum and maximum intervals (in hours), + """Find the number of 'triplets' of three images taken in any band, + based on user-selected minimum and maximum intervals (in hours), as well as constraining the ratio of the two exposures intervals. - Triplets are not required to be consecutive observations and may be overlapping. + Triplets are not required to be consecutive observations and + may be overlapping. """ def __init__(self, time_col="expMJD", **kwargs): @@ -40,25 +50,28 @@ def run(self, data_slice, slice_point=None): # iterate over every middle exposure for middleindex in index2: timeb = times[middleindex] - # calculate the window to look for all possible third points in + # calculate the window to look for all possible third points minmax2 = [timeb + delmin, timeb + delmax] index3 = np.where((times > minmax2[0]) & (times < minmax2[1]))[0] newadd = np.size(index3) - total = total + newadd # add all triplets with same first two observations to total + total = total + newadd # add all triplets with same + # first two observations to total return total class TripletBandMetric(BaseMetric): - """Find the number of 'triplets' of three images taken in the same band, based on user-selected minimum and maximum intervals (in hours), + """Find the number of 'triplets' of three images taken in the same band, + based on user-selected minimum and maximum intervals (in hours), as well as constraining the ratio of the two exposures intervals. - Triplets are not required to be consecutive observations and may be overlapping. + Triplets are not required to be consecutive observations and may be + overlapping. """ def __init__(self, time_col="expMJD", filter_col="filter", **kwargs): self.time_col = time_col self.filter_col = filter_col - self.delmin = kwargs.pop("DelMin", 1) / 24.0 # convert minutes to hours - self.delmax = kwargs.pop("DelMax", 12) / 24.0 # convert minutes to hours + self.delmin = kwargs.pop("DelMin", 1) / 24.0 + self.delmax = kwargs.pop("DelMax", 12) / 24.0 self.ratiomax = kwargs.pop("RatioMax", 1000) self.ratiomin = kwargs.pop("RatioMin", 1) super(TripletBandMetric, self).__init__(col=[self.time_col, self.filter_col], **kwargs) @@ -97,26 +110,29 @@ def run(self, data_slice, slice_point=None): timeband = timedict[band] # iterate over every exposure time for counter, time in enumerate(timeband): - # calculate the window to look for all possible second points in + # calculate the window to look for all possible second points minmax = [time + delmin, time + delmax] index2 = np.where((minmax[0] < timeband) & (timeband < minmax[1]))[0] # iterate over every middle exposure for middleindex in index2: timeb = timeband[middleindex] - # calculate the window to look for all possible third points in + # calculate the window to look for all + # possible third points minmax2 = [timeb + delmin, timeb + delmax] index3 = np.where((timeband > minmax2[0]) & (timeband < minmax2[1]))[0] # iterate over last exposure of triplet for lastindex in index3: timec = timeband[lastindex] - # calculate intervals for T1 to T2 and T2 to T3, and take ratio + # calculate intervals for T1 to T2 and T2 to T3, + # and take ratio delt1 = timeb - time delt2 = timec - timeb ratio = np.max([delt1, delt2]) / np.min([delt1, delt2]) - # check if ratio is within restrictions (ratio should never be < 1 ) + # check if ratio is within restrictions + # (ratio should never be < 1 ) if ratiomin < ratio < ratiomax: bandcounter[band] = bandcounter[band] + 1 - return bandcounter # return bandcounter dictionary + return bandcounter def reduce_bandall(self, bandcounter): return np.sum(list(bandcounter.values())) From a3e96ca09929fcca241ac56d9f44bb048a831282 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Thu, 1 Feb 2024 00:49:44 -0800 Subject: [PATCH 15/26] Remove unused PeriodDeviationMetric --- rubin_sim/maf/maf_contrib/var_metrics.py | 136 ----------------------- 1 file changed, 136 deletions(-) delete mode 100644 rubin_sim/maf/maf_contrib/var_metrics.py diff --git a/rubin_sim/maf/maf_contrib/var_metrics.py b/rubin_sim/maf/maf_contrib/var_metrics.py deleted file mode 100644 index c10d03e79..000000000 --- a/rubin_sim/maf/maf_contrib/var_metrics.py +++ /dev/null @@ -1,136 +0,0 @@ -__all__ = ("PeriodDeviationMetric",) - -import numpy as np -from scipy.signal import lombscargle - -from rubin_sim.maf.metrics import BaseMetric - -# Example of a *very* simple variabiilty metric -# krughoff@uw.edu, ebellm, ljones - - -def find_period_ls(times, mags, minperiod=2.0, maxperiod=35.0, nbinmax=10**5, verbose=False): - """Find the period of a lightcurve using scipy's lombscargle method. - The parameters used here imply magnitudes but there is no reason this would not work if fluxes are passed. - - :param times: A list of times for the given observations - :param mags: A list of magnitudes for the object at the given times - :param minperiod: Minimum period to search - :param maxperiod: Maximum period to search - :param nbinmax: Maximum number of frequency bins to use in the search - :returns: Period in the same units as used in times. This is simply - the max value in the Lomb-Scargle periodogram - """ - if minperiod < 0: - minperiod = 0.01 - nbins = int((times.max() - times.min()) / minperiod * 1000) - if nbins > nbinmax: - if verbose: - print("lowered nbins") - nbins = nbinmax - - # Recenter the magnitude measurements about zero - dmags = mags - np.median(mags) - # Create frequency bins - f = np.linspace(1.0 / maxperiod, 1.0 / minperiod, nbins) - - # Calculate periodogram - pgram = lombscargle(times, dmags, f) - - idx = np.argmax(pgram) - # Return period of the bin with the max value in the periodogram - return 1.0 / f[idx] - - -class PeriodDeviationMetric(BaseMetric): - """Measure the percentage deviation of recovered periods for pure sine wave variability (in magnitude).""" - - def __init__( - self, - col="observationStartMJD", - period_min=3.0, - period_max=35.0, - n_periods=5, - mean_mag=21.0, - amplitude=1.0, - metric_name="Period Deviation", - period_check=None, - **kwargs, - ): - """ - Construct an instance of a PeriodDeviationMetric class - - :param col: Name of the column to use for the observation times, commonly 'expMJD' - :param period_min: Minimum period to test (days) - :param period_max: Maximimum period to test (days) - :param period_check: Period to use in the reduce function (days) - :param mean_mag: Mean value of the lightcurve - :param amplitude: Amplitude of the variation (mags) - """ - self.period_min = period_min - self.period_max = period_max - self.period_check = period_check - self.guess_p_min = np.min([self.period_min * 0.8, self.period_min - 1]) - self.guess_p_max = np.max([self.period_max * 1.20, self.period_max + 1]) - self.n_periods = n_periods - self.mean_mag = mean_mag - self.amplitude = amplitude - super(PeriodDeviationMetric, self).__init__(col, metric_name=metric_name, **kwargs) - - def run(self, data_slice, slice_point=None): - """ - Run the PeriodDeviationMetric - :param data_slice : Data for this slice. - :param slice_point: Metadata for the slice. (optional) - :return: The error in the period estimated from a Lomb-Scargle periodogram - """ - - # Make sure the observation times are sorted - data = np.sort(data_slice[self.colname]) - - # Create 'nPeriods' random periods within range of min to max. - if self.period_check is not None: - periods = [self.period_check] - else: - periods = self.period_min + np.random.random(self.n_periods) * (self.period_max - self.period_min) - # Make sure the period we want to check is in there - periodsdev = np.zeros(np.size(periods), dtype="float") - for i, period in enumerate(periods): - omega = 1.0 / period - # Calculate up the amplitude. - lc = self.mean_mag + self.amplitude * np.sin(omega * data) - # Try to recover the period given a window buffered by min of a day or 20% of period value. - if len(lc) < 3: - # Too few points to find a period - return self.badval - - pguess = find_period_ls(data, lc, minperiod=self.guess_p_min, maxperiod=self.guess_p_max) - periodsdev[i] = (pguess - period) / period - - return {"periods": periods, "periodsdev": periodsdev} - - def reduce_p_dev(self, metric_val): - """ - At a particular slice_point, return the period deviation for self.period_check. - If self.period_check is None, just return a random period in the range. - """ - result = metric_val["periodsdev"][0] - return result - - def reduce_worst_period(self, metric_val): - """ - At each slice_point, return the period with the worst period deviation. - """ - worst_p = np.array(metric_val["periods"])[ - np.where(metric_val["periodsdev"] == metric_val["periodsdev"].max())[0] - ] - return worst_p - - def reduce_worst_p_dev(self, metric_val): - """ - At each slice_point, return the largest period deviation. - """ - worst_p_dev = np.array(metric_val["periodsdev"])[ - np.where(metric_val["periodsdev"] == metric_val["periodsdev"].max())[0] - ] - return worst_p_dev From 3c8330ed91a90ca0cdd0594f3d238b5cb060e181 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Thu, 1 Feb 2024 02:03:00 -0800 Subject: [PATCH 16/26] Remove unused scripts from lss_obs_strategy --- .../maf_contrib/lss_obs_strategy/alm_plots.py | 281 ---- .../artificial_structure_calculation.py | 1203 ----------------- .../lss_obs_strategy/coadd_m5_analysis.py | 644 --------- .../constants_for_pipeline.py | 101 +- .../galaxy_counts_metric_extended.py | 76 +- .../galaxy_counts_with_pixel_calibration.py | 66 +- .../masking_algorithm_generalized.py | 244 ---- .../lss_obs_strategy/os_bias_analysis.py | 1049 -------------- .../maf_contrib/lss_obs_strategy/readme.md | 21 - 9 files changed, 104 insertions(+), 3581 deletions(-) delete mode 100644 rubin_sim/maf/maf_contrib/lss_obs_strategy/alm_plots.py delete mode 100644 rubin_sim/maf/maf_contrib/lss_obs_strategy/artificial_structure_calculation.py delete mode 100644 rubin_sim/maf/maf_contrib/lss_obs_strategy/coadd_m5_analysis.py delete mode 100644 rubin_sim/maf/maf_contrib/lss_obs_strategy/masking_algorithm_generalized.py delete mode 100644 rubin_sim/maf/maf_contrib/lss_obs_strategy/os_bias_analysis.py delete mode 100644 rubin_sim/maf/maf_contrib/lss_obs_strategy/readme.md diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/alm_plots.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/alm_plots.py deleted file mode 100644 index 4c28af7f7..000000000 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/alm_plots.py +++ /dev/null @@ -1,281 +0,0 @@ -##################################################################################################### -# Purpose: plot skymaps/cartview plots corresponding to alms with specfied l-range (s). -# -# Humna Awan: humna.awan@rutgers.edu -##################################################################################################### -__all__ = ("alm_plots",) - -import os - -import healpy as hp -import matplotlib.pyplot as plt -import numpy as np - - -def alm_plots( - path, - out_dir, - bundle, - nside=128, - lmax=500, - filterband="i", - ra_range=[-50, 50], - dec_range=[-65, 5], - subsets_to_consider=[[130, 165], [240, 300]], - show_plots=True, -): - """ - Plot the skymaps/cartview plots corresponding to alms with specified l-ranges. - Automatically creates the output directories and saves the plots. - - Parameters - ---------- - path : `str` - path to the main directory where output directory is saved - out_dir : `str` - name of the main output directory - bundle : metricBundle - nside : `int` - HEALpix resolution parameter. Default: 128 - lmax : `int` - upper limit on the multipole. Default: 500 - filterband : `str` - any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'i' - ra_range : `np.ndarray` - range of right ascention (in degrees) to consider in cartview plot; only useful when - cartview= True. Default: [-50,50] - dec_range : `np.ndarray` - range of declination (in degrees) to consider in cartview plot; only useful when - cartview= True. Default: [-65,5] - subsets_to_consider : `np.ndarray` - l-ranges to consider, e.g. use [[50, 100]] to consider 50 subsets_to_consider[case][0]) & (l < subsets_to_consider[case][1]) - color[case] = color_array[case] - - # ------------------------------------------------------------------------ - # plot things out - plt.clf() - plt.plot(l, (cl * l * (l + 1)) / (2.0 * np.pi), color="b") - for key in list(lsubsets.keys()): - plt.plot( - l[lsubsets[key]], - (cl[lsubsets[key]] * l[lsubsets[key]] * (l[lsubsets[key]] + 1)) / (2.0 * np.pi), - color=color[key], - ) - plt.title(dither) - plt.xlabel("$\ell$") - plt.ylabel(r"$\ell(\ell+1)C_\ell/(2\pi)$") - filename = "cls_%s.png" % (dither) - plt.savefig( - "%s%s/%s/%s" % (path, out_dir, out_dir2, filename), - format="png", - bbox_inches="tight", - ) - - if show_plots: - plt.show() - else: - plt.close() - - survey_median = survey_median_dict[dither] - survey_std = survey_std_dict[dither] - - # ------------------------------------------------------------------------ - # plot full-sky-alm plots first - n_ticks = 5 - color_min = survey_median - 1.5 * survey_std - color_max = survey_median + 1.5 * survey_std - increment = (color_max - color_min) / float(n_ticks) - ticks = np.arange(color_min + increment, color_max, increment) - - # full skymap - hp.mollview( - hp.alm2map(alm, nside=nside, lmax=lmax) + survey_median, - flip="astro", - rot=(0, 0, 0), - min=color_min, - max=color_max, - title="", - cbar=False, - ) - hp.graticule(dpar=20, dmer=20, verbose=False) - plt.title("Full Map") - - ax = plt.gca() - im = ax.get_images()[0] - - fig = plt.gcf() - cbaxes = fig.add_axes([0.1, 0.015, 0.8, 0.04]) # [left, bottom, width, height] - cb = plt.colorbar(im, orientation="horizontal", format="%.2f", ticks=ticks, cax=cbaxes) - cb.set_label("$%s$-band Coadded Depth" % filterband) - filename = "alm_FullMap_%s.png" % (dither) - plt.savefig( - "%s%s/%s/%s/%s" % (path, out_dir, out_dir2, out_dir3, filename), - format="png", - bbox_inches="tight", - ) - - # full cartview - hp.cartview( - hp.alm2map(alm, nside=nside, lmax=lmax) + survey_median, - lonra=ra_range, - latra=dec_range, - flip="astro", - min=color_min, - max=color_max, - title="", - cbar=False, - ) - hp.graticule(dpar=20, dmer=20, verbose=False) - plt.title("Full Map") - ax = plt.gca() - im = ax.get_images()[0] - fig = plt.gcf() - cbaxes = fig.add_axes([0.1, -0.05, 0.8, 0.04]) # [left, bottom, width, height] - cb = plt.colorbar(im, orientation="horizontal", format="%.2f", ticks=ticks, cax=cbaxes) - cb.set_label("$%s$-band Coadded Depth" % filterband) - filename = "alm_Cartview_FullMap_%s.png" % (dither) - plt.savefig( - "%s%s/%s/%s/%s" % (path, out_dir, out_dir2, out_dir4, filename), - format="png", - bbox_inches="tight", - ) - - # prepare for the skymaps for l-range subsets - color_min = survey_median - 0.1 * survey_std - color_max = survey_median + 0.1 * survey_std - increment = (color_max - color_min) / float(n_ticks) - increment = 1.15 * increment - ticks = np.arange(color_min + increment, color_max, increment) - - # ------------------------------------------------------------------------ - # consider each l-range - for case in list(lsubsets.keys()): - index = [] - low_lim = subsets_to_consider[case][0] - up_lim = subsets_to_consider[case][1] - for ll in np.arange(low_lim, up_lim + 1): - for mm in np.arange(0, ll + 1): - index.append(hp.Alm.getidx(lmax=lmax, l=ll, m=mm)) - alms1 = alm.copy() - alms1.fill(0) - alms1[index] = alm[index] # an unmasked array - - # plot the skymap - hp.mollview( - hp.alm2map(alms1, nside=nside, lmax=lmax) + survey_median, - flip="astro", - rot=(0, 0, 0), - min=color_min, - max=color_max, - title="", - cbar=False, - ) - hp.graticule(dpar=20, dmer=20, verbose=False) - plt.title("%s<$\ell$<%s" % (low_lim, up_lim)) - ax = plt.gca() - im = ax.get_images()[0] - fig = plt.gcf() - cbaxes = fig.add_axes([0.1, 0.015, 0.8, 0.04]) # [left, bottom, width, height] - cb = plt.colorbar(im, orientation="horizontal", format="%.3f", ticks=ticks, cax=cbaxes) - cb.set_label("$%s$-band Coadded Depth" % filterband) - filename = "almSkymap_%s 1 - plt.plot(ell, (spec * ell * (ell + 1)) / 2.0 / np.pi) - plt.title("Photometric Calibration Error: %s" % dither) - plt.xlabel(r"$\ell$") - plt.ylabel(r"$\ell(\ell+1)C_\ell/(2\pi)$") - plt.xlim(0, 500) - - if save0pt_plots: - # save power spectrum - filename = "0ptPowerSpectrum_%s.png" % (dither) - plt.savefig( - "%s%s/%s/%s" % (path, out_dir, out_dir_new, filename), - bbox_inches="tight", - format="png", - ) - - if show0pt_plots: - plt.show() - else: - plt.close() - - print( - "\n## Time since the start of the calculation: %.2f hrs" % ((time.time() - start_time) / 3600.0) - ) - - # ------------------------------------------------------------------------ - # Now recalculate the num_gal with the fluctuations in depth due to calibation uncertainties. - print("\n# Recalculating num_gal including 0pt errors on the upper mag limit .. ") - for dither in my_bundles: - zero_pt_err = zero_pt_error[dither].copy() - in_survey = np.where(my_bundles[dither].metricValues.mask == False)[ - 0 - ] # 04/27: only look at in_survey region - for i in in_survey: # 4/27 - if zero_pt_err[i] != -500: # run only when zeroPt was calculated - my_bundles[dither].metricValues.data[i] = GalaxyCounts_0ptErrors( - coadd_bundle[dither].metricValues.data[i], - upper_mag_limit + zero_pt_err[i], - redshift_bin=redshift_bin, - filter_band=filter_band, - nside=nside, - cfhtls_counts=cfhtls_counts, - normalized_mock_catalog_counts=normalized_mock_catalog_counts, - ) - # ------------------------------------------------------------------------ - - # save the raw num_gal data. - if save_num_gal_data_after0pt: - out_dir_new = "numGalData_afterBorderMasking_after0pt" - for b in my_bundles: - my_bundles[b].write(out_dir=out_dir_new) - - # ------------------------------------------------------------------------ - # print out tot(num_gal) associated with each strategy - # add to the read me as well - update = "\n# After 0pt error calculation and border masking: " - print(update) - for dither in my_bundles: - ind = np.where(my_bundles[dither].metricValues.mask[:] == False)[0] - print_out = "Total Galaxies for %s: %.9e" % ( - dither, - sum(my_bundles[dither].metricValues.data[ind]), - ) - update += "\n %s" % print_out - print(print_out) - update += "\n" - readme = open("%s%s/%s" % (path, out_dir, readme_name), "a") - readme.write(update) - readme.close() - - print("\n## Time since the start of the calculation: %.2f hrs" % ((time.time() - start_time) / 3600.0)) - - ######################################################################################################### - # add poisson noise? - if add_poisson_noise: - print("\n# adding poisson noise to num_gal ... ") - for dither in my_bundles: - # make sure the values are valid; sometimes metric leaves negative numbers or nan values. - out_of_survey = np.where(my_bundles[dither].metricValues.mask == True)[0] - my_bundles[dither].metricValues.data[out_of_survey] = 0.0 - - in_survey = np.where(my_bundles[dither].metricValues.mask == False)[0] - j = np.where(my_bundles[dither].metricValues.data[in_survey] < 1.0)[0] - my_bundles[dither].metricValues.data[in_survey][j] = 0.0 - - noisy_num_gal = np.random.poisson(lam=my_bundles[dither].metricValues.data, size=None) - my_bundles[dither].metricValues.data[:] = noisy_num_gal - # ------------------------------------------------------------------------ - - # save the num_gal data. - if saveNumGalDataAfterPoisson: - out_dir_new = "numGalData_afterBorderMasking_after0pt_afterPoisson" - for b in my_bundles: - my_bundles[b].write(out_dir=out_dir_new) - - # ------------------------------------------------------------------------ - # print out tot(num_gal) associated with each strategy - # add to the read me as well - update = "\n# After adding poisson noise: " - print(update) - for dither in my_bundles: - ind = np.where(my_bundles[dither].metricValues.mask[:] == False)[0] - print_out = "Total Galaxies for %s: %.9e" % ( - dither, - sum(my_bundles[dither].metricValues.data[ind]), - ) - update += "\n %s" % print_out - print(print_out) - update += "\n" - readme = open("%s%s/%s" % (path, out_dir, readme_name), "a") - readme.write(update) - readme.close() - - print("\n## Time since the start of the calculation: %.2f hrs" % ((time.time() - start_time) / 3600.0)) - ######################################################################################################### - plot_handler = plots.PlotHandler( - out_dir="%s%s" % (path, out_dir), - results_db=results_db, - thumbnail=False, - savefig=False, - ) - print("\n# Calculating fluctuations in the galaxy counts ...") - # Change num_gal metric data to deltaN/N - num_gal = {} - # add to readme too - update = "\n" - for dither in my_bundles: - # zero out small/nan entries --- problem: should really be zeroed out by the metric *** - j = np.where(np.isnan(my_bundles[dither].metricValues.data) == True)[0] - my_bundles[dither].metricValues.data[j] = 0.0 - j = np.where(my_bundles[dither].metricValues.data < 1.0)[0] - my_bundles[dither].metricValues.data[j] = 0.0 - # calculate the fluctuations - num_gal[dither] = my_bundles[ - dither - ].metricValues.data.copy() # keep track of num_gal for plotting purposes - valid_pixel = np.where(my_bundles[dither].metricValues.mask == False)[0] - galaxy_average = sum(num_gal[dither][valid_pixel]) / len(valid_pixel) - - # in place calculation of the fluctuations - my_bundles[dither].metricValues.data[:] = 0.0 - my_bundles[dither].metricValues.data[valid_pixel] = ( - num_gal[dither][valid_pixel] - galaxy_average - ) / galaxy_average - print_out = "# Galaxy Average for %s: %s" % (dither, galaxy_average) - print(print_out) - update += "%s\n" % print_out - - readme = open("%s%s/%s" % (path, out_dir, readme_name), "a") - readme.write(update) - readme.close() - - # ------------------------------------------------------------------------ - # save the deltaN/N data - if save_delta_n_by_n_data: - out_dir_new = "deltaNByNData" - for b in my_bundles: - my_bundles[b].write(out_dir=out_dir_new) - - # ------------------------------------------------------------------------ - # Calculate total power - # add to the read me as well - summarymetric = metrics.TotalPowerMetric() - update = "" - for dither in my_bundles: - my_bundles[dither].set_summary_metrics(summarymetric) - my_bundles[dither].compute_summary_stats() - print_out = "# Total power for %s case is %f." % ( - dither, - my_bundles[dither].summary_values["TotalPower"], - ) - print(print_out) - update += "\n%s" % (print_out) - update += "\n" - - readme = open("%s%s/%s" % (path, out_dir, readme_name), "a") - readme.write(update) - readme.close() - # ------------------------------------------------------------------------ - # calculate the power spectra - cl = {} - for dither in my_bundles: - cl[dither] = hp.anafast( - my_bundles[dither].metricValues.filled(my_bundles[dither].slicer.badval), - lmax=500, - ) - # save deltaN/N spectra? - if save_cls_for_delta_n_by_n: - out_dir_new = "cls_DeltaByN" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - - for dither in my_bundles: - filename = "cls_deltaNByN_%s_%s" % (filter_band, dither) - np.save("%s%s/%s/%s" % (path, out_dir, out_dir_new, filename), cl[dither]) - - ########################################################################################################## - # Plots for the fluctuations: power spectra, histogram - if len(list(my_bundles.keys())) > 1: - out_dir_new = "artificialFluctuationsComparisonPlots" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - # ------------------------------------------------------------------------ - # power spectra - for dither in my_bundles: - ell = np.arange(np.size(cl[dither])) - condition = ell > 1 - plt.plot( - ell, - (cl[dither] * ell * (ell + 1)) / 2.0 / np.pi, - color=plot_color[dither], - linestyle="-", - label=dither, - ) - plt.xlabel(r"$\ell$") - plt.ylabel(r"$\ell(\ell+1)C_\ell/(2\pi)$") - plt.xlim(0, 500) - leg = plt.legend(labelspacing=0.001) - for legobj in leg.legendHandles: - legobj.set_linewidth(2.0) - plt.savefig( - "%s%s/%s/powerspectrum_comparison.png" % (path, out_dir, out_dir_new), - format="png", - ) - if show_comp_plots: - plt.show() - else: - plt.close("all") - # ------------------------------------------------------------------------ - # create the histogram - scale = hp.nside2pixarea(nside, degrees=True) - - def tick_formatter(y, pos): - return "%d" % (y * scale) # convert pixel count to area - - for dither in my_bundles: - ind = np.where(my_bundles[dither].metricValues.mask == False)[0] - bin_size = 0.01 - bin_all = int( - ( - max(my_bundles[dither].metricValues.data[ind]) - - min(my_bundles[dither].metricValues.data[ind]) - ) - / bin_size - ) - plt.hist( - my_bundles[dither].metricValues.data[ind], - bins=bin_all, - label=dither, - histtype="step", - color=plot_color[dither], - ) - # plt.xlim(-0.6,1.2) - ax = plt.gca() - ymin, ymax = ax.get_ylim() - n_yticks = 10.0 - wanted_y_max = ymax * scale - wanted_y_max = 10.0 * np.ceil(float(wanted_y_max) / 10.0) - increment = 5.0 * np.ceil(float(wanted_y_max / n_yticks) / 5.0) - wanted_array = np.arange(0, wanted_y_max, increment) - ax.yaxis.set_ticks(wanted_array / scale) - ax.yaxis.set_major_formatter(FuncFormatter(tick_formatter)) - plt.xlabel(r"$\mathrm{\Delta N/\overline{N}}$") - plt.ylabel("Area (deg$^2$)") - leg = plt.legend(labelspacing=0.001, bbox_to_anchor=(1, 1)) - for legobj in leg.legendHandles: - legobj.set_linewidth(2.0) - plt.savefig( - "%s%s/%s/histogram_comparison.png" % (path, out_dir, out_dir_new), - bbox_inches="tight", - format="png", - ) - if show_comp_plots: - plt.show() - else: - plt.close("all") - - # now remove the results db object -- useless - os.remove("%s%s/%s" % (path, out_dir, results_dbname)) - print("Removed %s from out_dir" % (results_dbname)) - - # all done. final update. - update = "\n## All done. Time since the start of the calculation: %.2f hrs" % ( - (time.time() - start_time) / 3600.0 - ) - print(update) - readme = open("%s%s/%s" % (path, out_dir, readme_name), "a") - readme.write(update) - readme.close() - - if return_stuff: - if include0pt_errors: - return my_bundles, out_dir, results_db, zero_pt_error - else: - return my_bundles, out_dir, results_db diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/coadd_m5_analysis.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/coadd_m5_analysis.py deleted file mode 100644 index 9e58b1761..000000000 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/coadd_m5_analysis.py +++ /dev/null @@ -1,644 +0,0 @@ -##################################################################################################### -# Purpose: calculate the coadded 5-sigma depth from various survey strategies. Incudes functionality -# to consider various survey strategies, mask shallow borders, create/save/show relevant plots, do -# an alm analysis, and save data. - -__all__ = ("coadd_m5_analysis",) - -import copy -import os - -import healpy as hp -import matplotlib.pyplot as plt - -# Humna Awan: humna.awan@rutgers.edu -##################################################################################################### -import numpy as np -from matplotlib.ticker import FuncFormatter, MaxNLocator - -import rubin_sim.maf.db as db -import rubin_sim.maf.maps as maps -import rubin_sim.maf.metric_bundles as metricBundles -import rubin_sim.maf.metrics as metrics -import rubin_sim.maf.plots as plots -import rubin_sim.maf.slicers as slicers -import rubin_sim.maf.stackers as mafStackers # stackers in sims_maf -from rubin_sim.maf.maf_contrib.lss_obs_strategy.alm_plots import alm_plots -from rubin_sim.maf.maf_contrib.lss_obs_strategy.constants_for_pipeline import plot_color -from rubin_sim.maf.maf_contrib.lss_obs_strategy.masking_algorithm_generalized import ( - masking_algorithm_generalized, -) - - -def coadd_m5_analysis( - path, - dbfile, - run_name, - slair=False, - wf_dand_dd_fs=False, - no_dith_only=False, - best_dith_only=False, - some_dith_only=False, - specified_dith=None, - nside=128, - filter_band="r", - include_dust_extinction=False, - saveun_masked_coadd_data=False, - pixel_radius_for_masking=5, - cut_off_year=None, - plot_skymap=True, - plot_cartview=True, - unmasked_color_min=None, - unmasked_color_max=None, - masked_color_min=None, - masked_color_max=None, - n_ticks=5, - plot_power_spectrum=True, - show_plots=True, - save_figs=True, - alm_analysis=True, - ra_range=[-50, 50], - dec_range=[-65, 5], - save_masked_coadd_data=True, -): - """ - - Analyze the artifacts induced in the coadded 5sigma depth due to imperfect observing strategy. - - Creates an output directory for subdirectories containing the specified things to save. - - Creates, shows, and saves comparison plots. - - Returns the metricBundle object containing the calculated coadded depth, and the output directory name. - - Parameters - ---------- - path : str - path to the main directory where output directory is to be saved. - dbfile : str - path to the OpSim output file, e.g. to a copy of enigma_1189 - run_name : str - run name tag to identify the output of specified OpSim output, e.g. 'enigma1189' - slair : `bool` - set to True if analysis on a SLAIR output. - Default: False - wf_dand_dd_fs : `bool` - set to True if want to consider both WFD survet and DDFs. Otherwise will only work - with WFD. Default: False - no_dith_only : `bool` - set to True if only want to consider the undithered survey. Default: False - best_dith_only : `bool` - set to True if only want to consider RandomDitherFieldPerVisit. - Default: False - some_dith_only : `bool` - set to True if only want to consider undithered and a few dithered surveys. - Default: False - specified_dith : str - specific dither strategy to run. - Default: None - nside : int - HEALpix resolution parameter. Default: 128 - filter_band : str - any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'r' - include_dust_extinction : `bool` - set to include dust extinction. Default: False - saveun_masked_coadd_data : `bool` - set to True to save data before border masking. Default: False - pixel_radius_for_masking : int - number of pixels to mask along the shallow border. Default: 5 - cut_off_year : int - year cut to restrict analysis to only a subset of the survey. - Must range from 1 to 9, or None for the full survey analysis (10 yrs). - Default: None - plot_skymap : `bool` - set to True if want to plot skymaps. Default: True - plot_cartview : `bool` - set to True if want to plot cartview plots. Default: False - unmasked_color_min : float - lower limit on the colorscale for unmasked skymaps. Default: None - unmasked_color_max : float - upper limit on the colorscale for unmasked skymaps. Default: None - masked_color_min : float - lower limit on the colorscale for border-masked skymaps. Default: None - masked_color_max : float - upper limit on the colorscale for border-masked skymaps. Default: None - n_ticks : int - (number of ticks - 1) on the skymap colorbar. Default: 5 - plot_power_spectrum : `bool` - set to True if want to plot powerspectra. Default: True - show_plots : `bool` - set to True if want to show figures. Default: True - save_figs : `bool` - set to True if want to save figures. Default: True - alm_analysis : `bool` - set to True to perform the alm analysis. Default: True - ra_range : float array - range of right ascention (in degrees) to consider in alm cartview plot; - applicable when alm_analysis=True. Default: [-50,50] - dec_range : float array - range of declination (in degrees) to consider in alm cartview plot; - applicable when alm_analysis=True. Default: [-65,5] - save_masked_coadd_data : `bool` - set to True to save the coadded depth data after the border - masking. Default: True - """ - # ------------------------------------------------------------------------ - # read in the database - if slair: - # slair database - opsdb = db.Database(dbfile, defaultTable="observations") - else: - # OpSim database - opsdb = db.OpsimDatabase(dbfile) - - # ------------------------------------------------------------------------ - # set up the out_dir - zeropt_tag = "" - if cut_off_year is not None: - zeropt_tag = "%syearCut" % cut_off_year - else: - zeropt_tag = "fullSurveyPeriod" - - if include_dust_extinction: - dust_tag = "withDustExtinction" - else: - dust_tag = "noDustExtinction" - - region_type = "" - if wf_dand_dd_fs: - region_type = "WFDandDDFs_" - - out_dir = "coaddM5Analysis_%snside%s_%s_%spixelRadiusForMasking_%sBand_%s_%s_directory" % ( - region_type, - nside, - dust_tag, - pixel_radius_for_masking, - filter_band, - run_name, - zeropt_tag, - ) - print("# out_dir: %s" % out_dir) - results_db = db.ResultsDb(out_dir=out_dir) - - # ------------------------------------------------------------------------ - # set up the sql constraint - if wf_dand_dd_fs: - if cut_off_year is not None: - night_cut_off = (cut_off_year) * 365.25 - sqlconstraint = 'night<=%s and filter=="%s"' % (night_cut_off, filter_band) - else: - sqlconstraint = 'filter=="%s"' % filter_band - else: - # set up the propID and units on the ra, dec - if slair: # no prop ID; only WFD is simulated. - wfd_where = "" - ra_dec_in_deg = True - else: - prop_ids, prop_tags = opsdb.fetchPropInfo() - wfd_where = "%s and " % opsdb.createSQLWhere("WFD", prop_tags) - ra_dec_in_deg = opsdb.raDecInDeg - # set up the year cutoff - if cut_off_year is not None: - night_cut_off = (cut_off_year) * 365.25 - sqlconstraint = '%snight<=%s and filter=="%s"' % ( - wfd_where, - night_cut_off, - filter_band, - ) - else: - sqlconstraint = '%sfilter=="%s"' % (wfd_where, filter_band) - print("# sqlconstraint: %s" % sqlconstraint) - - # ------------------------------------------------------------------------ - # setup all the slicers - slicer = {} - stacker_list = {} - - if ( - specified_dith is not None - ): # would like to add all the stackers first and then keep only the one that is specified - best_dith_only, no_dith_only = False, False - - if best_dith_only: - stacker_list["RandomDitherFieldPerVisit"] = [ - mafStackers.RandomDitherFieldPerVisitStacker(degrees=ra_dec_in_deg, random_seed=1000) - ] - slicer["RandomDitherFieldPerVisit"] = slicers.HealpixSlicer( - lon_col="randomDitherFieldPerVisitRa", - lat_col="randomDitherFieldPerVisitDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - else: - if slair: - slicer["NoDither"] = slicers.HealpixSlicer( - lon_col="RA", - lat_col="dec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - else: - slicer["NoDither"] = slicers.HealpixSlicer( - lon_col="fieldRA", - lat_col="fieldDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - if some_dith_only and not no_dith_only: - # stacker_list['RepulsiveRandomDitherFieldPerVisit'] = [myStackers.RepulsiveRandomDitherFieldPerVisitStacker(degrees=ra_dec_in_deg, - # random_seed=1000)] - # slicer['RepulsiveRandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerVisitRa', - # latCol='repulsiveRandomDitherFieldPerVisitDec', - # latLonDeg=ra_dec_in_deg, nside=nside, - # use_cache=False) - slicer["SequentialHexDitherFieldPerNight"] = slicers.HealpixSlicer( - lon_col="hexDitherFieldPerNightRa", - lat_col="hexDitherFieldPerNightDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["PentagonDitherPerSeason"] = slicers.HealpixSlicer( - lon_col="pentagonDitherPerSeasonRa", - lat_col="pentagonDitherPerSeasonDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - elif not no_dith_only: - # random dithers on different timescales - stacker_list["RandomDitherPerNight"] = [ - mafStackers.RandomDitherPerNightStacker(degrees=ra_dec_in_deg, random_seed=1000) - ] - stacker_list["RandomDitherFieldPerNight"] = [ - mafStackers.RandomDitherFieldPerNightStacker(degrees=ra_dec_in_deg, random_seed=1000) - ] - stacker_list["RandomDitherFieldPerVisit"] = [ - mafStackers.RandomDitherFieldPerVisitStacker(degrees=ra_dec_in_deg, random_seed=1000) - ] - - # rep random dithers on different timescales - # stacker_list['RepulsiveRandomDitherPerNight'] = [myStackers.RepulsiveRandomDitherPerNightStacker(degrees=ra_dec_in_deg, - # random_seed=1000)] - # stacker_list['RepulsiveRandomDitherFieldPerNight'] = [myStackers.RepulsiveRandomDitherFieldPerNightStacker(degrees=ra_dec_in_deg, - # random_seed=1000)] - # stacker_list['RepulsiveRandomDitherFieldPerVisit'] = [myStackers.RepulsiveRandomDitherFieldPerVisitStacker(degrees=ra_dec_in_deg, - # random_seed=1000)] - # set up slicers for different dithers - # random dithers on different timescales - slicer["RandomDitherPerNight"] = slicers.HealpixSlicer( - lon_col="randomDitherPerNightRa", - lat_col="randomDitherPerNightDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["RandomDitherFieldPerNight"] = slicers.HealpixSlicer( - lon_col="randomDitherFieldPerNightRa", - lat_col="randomDitherFieldPerNightDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["RandomDitherFieldPerVisit"] = slicers.HealpixSlicer( - lon_col="randomDitherFieldPerVisitRa", - lat_col="randomDitherFieldPerVisitDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - # rep random dithers on different timescales - # slicer['RepulsiveRandomDitherPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherPerNightRa', - # latCol='repulsiveRandomDitherPerNightDec', - # latLonDeg=ra_dec_in_deg, nside=nside, use_cache=False) - # slicer['RepulsiveRandomDitherFieldPerNight'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerNightRa', - # latCol='repulsiveRandomDitherFieldPerNightDec', - # latLonDeg=ra_dec_in_deg, nside=nside, - # use_cache=False) - # slicer['RepulsiveRandomDitherFieldPerVisit'] = slicers.HealpixSlicer(lonCol='repulsiveRandomDitherFieldPerVisitRa', - # latCol='repulsiveRandomDitherFieldPerVisitDec', - # latLonDeg=ra_dec_in_deg, nside=nside, - # use_cache=False) - # spiral dithers on different timescales - slicer["FermatSpiralDitherPerNight"] = slicers.HealpixSlicer( - lon_col="fermatSpiralDitherPerNightRa", - lat_col="fermatSpiralDitherPerNightDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["FermatSpiralDitherFieldPerNight"] = slicers.HealpixSlicer( - lon_col="fermatSpiralDitherFieldPerNightRa", - lat_col="fermatSpiralDitherFieldPerNightDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["FermatSpiralDitherFieldPerVisit"] = slicers.HealpixSlicer( - lon_col="fermatSpiralDitherFieldPerVisitRa", - lat_col="fermatSpiralDitherFieldPerVisitDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - # hex dithers on different timescales - slicer["SequentialHexDitherPerNight"] = slicers.HealpixSlicer( - lon_col="hexDitherPerNightRa", - lat_col="hexDitherPerNightDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["SequentialHexDitherFieldPerNight"] = slicers.HealpixSlicer( - lon_col="hexDitherFieldPerNightRa", - lat_col="hexDitherFieldPerNightDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["SequentialHexDitherFieldPerVisit"] = slicers.HealpixSlicer( - lon_col="hexDitherFieldPerVisitRa", - lat_col="hexDitherFieldPerVisitDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - # per season dithers - slicer["PentagonDitherPerSeason"] = slicers.HealpixSlicer( - lon_col="pentagonDitherPerSeasonRa", - lat_col="pentagonDitherPerSeasonDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["PentagonDiamondDitherPerSeason"] = slicers.HealpixSlicer( - lon_col="pentagonDiamondDitherPerSeasonRa", - lat_col="pentagonDiamondDitherPerSeasonDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - slicer["SpiralDitherPerSeason"] = slicers.HealpixSlicer( - lon_col="spiralDitherPerSeasonRa", - lat_col="spiralDitherPerSeasonDec", - lat_lon_deg=ra_dec_in_deg, - nside=nside, - use_cache=False, - ) - if specified_dith is not None: - stacker_list_, slicer_ = {}, {} - if specified_dith in slicer.keys(): - if specified_dith.__contains__( - "Random" - ): # only Random dithers have a stacker object for rand seed specification - stacker_list_[specified_dith] = stacker_list[specified_dith] - slicer_[specified_dith] = slicer[specified_dith] - else: - raise ValueError( - "Invalid value for specified_dith: %s. Allowed values include one of the following:\n%s" - % (specified_dith, slicer.keys()) - ) - stacker_list, slicer = stacker_list_, slicer_ - - # ------------------------------------------------------------------------ - if slair: - m5_col = "fivesigmadepth" - else: - m5_col = "fiveSigmaDepth" - # set up the metric - if include_dust_extinction: - # include dust extinction when calculating the co-added depth - coadd_metric = metrics.ExgalM5(m5_col=m5_col, lsstFilter=filter_band) - else: - coadd_metric = metrics.Coaddm5Metric(m5col=m5col) - dust_map = maps.DustMap( - interp=False, nside=nside - ) # include dust_map; actual in/exclusion of dust is handled by the galaxyCountMetric - - # ------------------------------------------------------------------------ - # set up the bundle - coadd_bundle = {} - for dither in slicer: - if dither in stacker_list: - coadd_bundle[dither] = metricBundles.MetricBundle( - coadd_metric, - slicer[dither], - sqlconstraint, - stacker_list=stacker_list[dither], - run_name=run_name, - metadata=dither, - maps_list=[dust_map], - ) - else: - coadd_bundle[dither] = metricBundles.MetricBundle( - coadd_metric, - slicer[dither], - sqlconstraint, - run_name=run_name, - metadata=dither, - maps_list=[dust_map], - ) - - # ------------------------------------------------------------------------ - # run the analysis - if include_dust_extinction: - print("\n# Running coadd_bundle with dust extinction ...") - else: - print("\n# Running coadd_bundle without dust extinction ...") - c_group = metricBundles.MetricBundleGroup( - coadd_bundle, opsdb, out_dir=out_dir, results_db=results_db, save_early=False - ) - c_group.run_all() - - # ------------------------------------------------------------------------ - plot_handler = plots.PlotHandler(out_dir=out_dir, results_db=results_db, thumbnail=False, savefig=False) - - print("# Number of pixels in the survey region (before masking the border):") - for dither in coadd_bundle: - print( - " %s: %s" - % ( - dither, - len(np.where(coadd_bundle[dither].metricValues.mask == False)[0]), - ) - ) - - # ------------------------------------------------------------------------ - # save the unmasked data? - if saveun_masked_coadd_data: - out_dir_new = "unmaskedCoaddData" - for b in coadd_bundle: - coadd_bundle[b].write(out_dir=out_dir_new) - - # ------------------------------------------------------------------------ - # mask the edges - print("\n# Masking the edges for coadd ...") - coadd_bundle = masking_algorithm_generalized( - coadd_bundle, - plot_handler, - data_label="$%s$-band Coadded Depth" % filter_band, - nside=nside, - pixel_radius=pixel_radius_for_masking, - plot_intermediate_plots=False, - plot_final_plots=False, - print_final_info=True, - ) - # ------------------------------------------------------------------------ - # Calculate total power - summarymetric = metrics.TotalPowerMetric() - for dither in coadd_bundle: - coadd_bundle[dither].set_summary_metrics(summarymetric) - coadd_bundle[dither].compute_summary_stats() - print( - "# Total power for %s case is %f." % (dither, coadd_bundle[dither].summary_values["TotalPower"]) - ) - print("") - - # ------------------------------------------------------------------------ - # run the alm analysis - if alm_analysis: - alm_plots( - path, - out_dir, - copy.deepcopy(coadd_bundle), - nside=nside, - filterband=filter_band, - ra_range=ra_range, - dec_range=dec_range, - show_plots=show_plots, - ) - # ------------------------------------------------------------------------ - # save the masked data? - if save_masked_coadd_data and (pixel_radius_for_masking > 0): - out_dir_new = "maskedCoaddData" - for b in coadd_bundle: - coadd_bundle[b].write(out_dir=out_dir_new) - - # ------------------------------------------------------------------------ - # plot comparison plots - if len(coadd_bundle.keys()) > 1: # more than one key - # set up the directory - out_dir_comp = "coaddM5ComparisonPlots" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_comp)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_comp)) - # ------------------------------------------------------------------------ - # plot for the power spectra - cl = {} - for dither in plot_color: - if dither in coadd_bundle: - cl[dither] = hp.anafast( - hp.remove_dipole( - coadd_bundle[dither].metricValues.filled(coadd_bundle[dither].slicer.badval) - ), - lmax=500, - ) - ell = np.arange(np.size(cl[dither])) - plt.plot( - ell, - (cl[dither] * ell * (ell + 1)) / 2.0 / np.pi, - color=plot_color[dither], - linestyle="-", - label=dither, - ) - plt.xlabel(r"$\ell$") - plt.ylabel(r"$\ell(\ell+1)C_\ell/(2\pi)$") - plt.xlim(0, 500) - fig = plt.gcf() - fig.set_size_inches(12.5, 10.5) - leg = plt.legend(labelspacing=0.001) - for legobj in leg.legendHandles: - legobj.set_linewidth(4.0) - filename = "powerspectrum_comparison_all.png" - plt.savefig( - "%s%s/%s/%s" % (path, out_dir, out_dir_comp, filename), - bbox_inches="tight", - format="png", - ) - plt.show() - - # create the histogram - scale = hp.nside2pixarea(nside, degrees=True) - - def tick_formatter(y, pos): - return "%d" % (y * scale) # convert pixel count to area - - bin_size = 0.01 - for dither in plot_color: - if dither in coadd_bundle: - ind = np.where(coadd_bundle[dither].metricValues.mask == False)[0] - bin_all = int( - ( - max(coadd_bundle[dither].metricValues.data[ind]) - - min(coadd_bundle[dither].metricValues.data[ind]) - ) - / bin_size - ) - plt.hist( - coadd_bundle[dither].metricValues.data[ind], - bins=bin_all, - label=dither, - histtype="step", - color=plot_color[dither], - ) - ax = plt.gca() - ymin, ymax = ax.get_ylim() - n_yticks = 10.0 - wanted_y_max = ymax * scale - wanted_y_max = 10.0 * np.ceil(float(wanted_y_max) / 10.0) - increment = 5.0 * np.ceil(float(wanted_y_max / n_yticks) / 5.0) - wanted_array = np.arange(0, wanted_y_max, increment) - ax.yaxis.set_ticks(wanted_array / scale) - ax.yaxis.set_major_formatter(FuncFormatter(tick_formatter)) - plt.xlabel("$%s$-band Coadded Depth" % filter_band) - plt.ylabel("Area (deg$^2$)") - fig = plt.gcf() - fig.set_size_inches(12.5, 10.5) - leg = plt.legend(labelspacing=0.001, loc=2) - for legobj in leg.legendHandles: - legobj.set_linewidth(2.0) - filename = "histogram_comparison.png" - plt.savefig( - "%s%s/%s/%s" % (path, out_dir, out_dir_comp, filename), - bbox_inches="tight", - format="png", - ) - plt.show() - # ------------------------------------------------------------------------ - # plot power spectra for the separte panel - tot_keys = len(list(coadd_bundle.keys())) - if tot_keys > 1: - plt.clf() - n_cols = 2 - n_rows = int(np.ceil(float(tot_keys) / n_cols)) - fig, ax = plt.subplots(n_rows, n_cols) - plot_row = 0 - plot_col = 0 - for dither in list(plot_color.keys()): - if dither in list(coadd_bundle.keys()): - ell = np.arange(np.size(cl[dither])) - ax[plot_row, plot_col].plot( - ell, - (cl[dither] * ell * (ell + 1)) / 2.0 / np.pi, - color=plot_color[dither], - label=dither, - ) - if plot_row == n_rows - 1: - ax[plot_row, plot_col].set_xlabel(r"$\ell$") - ax[plot_row, plot_col].set_ylabel(r"$\ell(\ell+1)C_\ell/(2\pi)$") - ax[plot_row, plot_col].yaxis.set_major_locator(MaxNLocator(3)) - if dither != "NoDither": - ax[plot_row, plot_col].set_ylim(0, 0.0035) - ax[plot_row, plot_col].set_xlim(0, 500) - plot_row += 1 - if plot_row > n_rows - 1: - plot_row = 0 - plot_col += 1 - fig.set_size_inches(20, int(n_rows * 30 / 7.0)) - filename = "powerspectrum_sepPanels.png" - plt.savefig( - "%s%s/%s/%s" % (path, out_dir, out_dir_comp, filename), - bbox_inches="tight", - format="png", - ) - plt.show() - return coadd_bundle, out_dir diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/constants_for_pipeline.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/constants_for_pipeline.py index dfd6f1125..125655506 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/constants_for_pipeline.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/constants_for_pipeline.py @@ -1,20 +1,21 @@ -################################################################################################# -# Various things declared here to be imported when running artificialStructureCalculation. -# Makes the updates easier, since the constants/objects defined here are used by different -# functions, e.g. power law constants are called by GalaxyCountsMetric_extended as well as -# GalaxyCounts_withPixelCalibration. Similarly, plot_color dictionary helps maintain a consistent -# color scheme for different observing strategies. +# Various things declared here to be imported when running +# artificialStructureCalculation. +# Makes the updates easier, since the constants/objects defined here +# are used by different +# functions, e.g. power law constants are called by +# GalaxyCountsMetric_extended as well as +# GalaxyCounts_withPixelCalibration. # # Included here: -# * power law constants from the i-band mock catalog; based on mocks from Padilla et al. +# * power law constants from the i-band mock catalog; based on mocks +# from Padilla et al. # * normalization constant for galaxy counts from the mock catalogs. -# * plot_color dictionary: colors for plotting results from different observing strategies.s # # Humna Awan # humna.awan@rutgers.edu -################################################################################################# -################################################################################################# -# Power law constants for each z-bin based on N. D. Padilla et al.'s mock catalogs + +# Power law constants for each z-bin based on N. D. Padilla et al.'s +# mock catalogs # General power law form: 10**(a*m+b) # Declare the dictionary for the power law constants from collections import OrderedDict @@ -65,80 +66,8 @@ power_law_const_a["4.0 g= u-0.4 => i= u-0.4*3 + if self.filter_band == "u": + # dimmer than i: u-g= 0.4 => g= u-0.4 => i= u-0.4*3 band_correction = -0.4 * 3.0 - elif self.filter_band == "g": # dimmer than i: g-r= 0.4 => r= g-0.4 => i= g-0.4*2 + elif self.filter_band == "g": + # dimmer than i: g-r= 0.4 => r= g-0.4 => i= g-0.4*2 band_correction = -0.4 * 2.0 - elif self.filter_band == "r": # dimmer than i: i= r-0.4 + elif self.filter_band == "r": + # dimmer than i: i= r-0.4 band_correction = -0.4 - elif self.filter_band == "i": # i + elif self.filter_band == "i": + # i band_correction = 0.0 - elif self.filter_band == "z": # brighter than i: i-z= 0.4 => i= z+0.4 + elif self.filter_band == "z": + # brighter than i: i-z= 0.4 => i= z+0.4 band_correction = 0.4 - elif self.filter_band == "y": # brighter than i: z-y= 0.4 => z= y+0.4 => i= y+0.4*2 + elif self.filter_band == "y": + # brighter than i: z-y= 0.4 => z= y+0.4 => i= y+0.4*2 band_correction = 0.4 * 2.0 else: print("ERROR: Invalid band in GalaxyCountsMetricExtended. Assuming i-band.") @@ -139,7 +156,8 @@ def _gal_count(self, apparent_mag, coaddm5): # consider the power laws if self.redshift_bin == "all": if self.cfhtls_counts: - # LSST power law: eq. 3.7 from LSST Science Book converted to per sq degree: + # LSST power law: eq. 3.7 from LSST Science Book + # converted to per sq degree: # (46*3600)*10^(0.31(i-25)) dn_gal = 46.0 * 3600.0 * np.power(10.0, 0.31 * (apparent_mag + band_correction - 25.0)) else: @@ -166,13 +184,15 @@ def _gal_count(self, apparent_mag, coaddm5): def run(self, data_slice, slice_point=None): # Calculate the coadded depth. infilt = np.where(data_slice[self.filter_col] == self.filter_band)[0] - # If there are no visits in this filter, return immediately with a flagged value + # If there are no visits in this filter, + # return immediately with a flagged value if len(infilt) == 0: return self.badval coaddm5 = self.coaddmetric.run(data_slice[infilt], slice_point) - # some coaddm5 values are really small (i.e. min=10**-314). Zero them out. + # some coaddm5 values are really small (i.e. min=10**-314). + # Zero them out. if coaddm5 < 1: num_gal = 0 diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_with_pixel_calibration.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_with_pixel_calibration.py index 7723a79b3..f821259dd 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_with_pixel_calibration.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_with_pixel_calibration.py @@ -1,16 +1,19 @@ -##################################################################################################### # Purpose: Calculate the galaxy counts for each Healpix pixel directly. -# Necessary when accounting for pixel-specific calibration errors (as they modify the magnitude limit +# Necessary when accounting for pixel-specific calibration errors +# (as they modify the magnitude limit # to which incompleteness-corrected galaxy LF is integrated over). # -# Similar to GalaxyCountsMetric_extended but does the analysis on each HEALpix pixel individually, -# without communicating with the slicer. Like a psuedo-metric. Accomodates individual redshift +# Similar to GalaxyCountsMetric_extended but does the analysis on each +# HEALpix pixel individually, +# without communicating with the slicer. Like a psuedo-metric. +# Accommodates individual redshift # bins; galaxy LF powerlaws based on mock catalogs from Padilla et al. # -# Need constantsForPipeline.py to import the power law constants and the normalization factor. +# Need constantsForPipeline.py to import the power law constants +# and the normalization factor. # # Humna Awan: humna.awan@rutgers.edu -##################################################################################################### + __all__ = ("galaxy_counts_with_pixel_calibration",) import warnings @@ -35,8 +38,8 @@ def galaxy_counts_with_pixel_calibration( normalized_mock_catalog_counts=True, ): """ - - Estimate galaxy counts for a given HEALpix pixel directly (without a slicer). + Estimate galaxy counts for a given HEALpix pixel directly + (without a slicer). Parameters ---------- @@ -49,8 +52,10 @@ def galaxy_counts_with_pixel_calibration( filter_band : `str`, opt Any one of 'u', 'g', 'r', 'i', 'z', 'y'. Default: 'i' redshift_bin : `str`, opt - options include '0. g= u-0.4 => i= u-0.4*3 + if filter_band == "u": + # dimmer than i: u-g= 0.4 => g= u-0.4 => i= u-0.4*3 band_correction = -0.4 * 3.0 - elif filter_band == "g": # dimmer than i: g-r= 0.4 => r= g-0.4 => i= g-0.4*2 + elif filter_band == "g": + # dimmer than i: g-r= 0.4 => r= g-0.4 => i= g-0.4*2 band_correction = -0.4 * 2.0 - elif filter_band == "r": # dimmer than i: i= r-0.4 + elif filter_band == "r": + # dimmer than i: i= r-0.4 band_correction = -0.4 - elif filter_band == "i": # i + elif filter_band == "i": + # i band_correction = 0.0 - elif filter_band == "z": # brighter than i: i-z= 0.4 => i= z+0.4 + elif filter_band == "z": + # brighter than i: i-z= 0.4 => i= z+0.4 band_correction = 0.4 - elif filter_band == "y": # brighter than i: z-y= 0.4 => z= y+0.4 => i= y+0.4*2 + elif filter_band == "y": + # brighter than i: z-y= 0.4 => z= y+0.4 => i= y+0.4*2 band_correction = 0.4 * 2.0 else: print("ERROR: Invalid band in GalaxyCountsMetric_withPixelCalibErrors. Assuming i-band.") @@ -92,7 +104,8 @@ def galaxy_counts_with_pixel_calibration( # check to make sure that the z-bin assigned is valid. if (redshift_bin != "all") and (redshift_bin not in list(power_law_const_a.keys())): print( - "ERROR: Invalid redshift bin in GalaxyCountsMetric_withPixelCalibration. Defaulting to all redshifts." + "ERROR: Invalid redshift bin in GalaxyCountsMetric_withPixelCalibration. " + "Defaulting to all redshifts." ) redshift_bin = "all" @@ -111,7 +124,8 @@ def gal_count_bin(apparent_mag, coaddm5): # when have to consider the entire z-range def gal_count_all(apparent_mag, coaddm5): if cfhtls_counts: - # LSST power law: eq. 3.7 from LSST Science Book converted to per sq degree: + # LSST power law: eq. 3.7 from LSST Science Book + # converted to per sq degree: # (46*3600)*10^(0.31(i-25)) dn_gal = 46.0 * 3600.0 * np.power(10.0, 0.31 * (apparent_mag + band_correction - 25.0)) else: @@ -127,7 +141,8 @@ def gal_count_all(apparent_mag, coaddm5): return dn_gal * completeness # ------------------------------------------------------------------------ - # some coaddm5 values come out really small (i.e. min= 10**-314). Zero them out. + # some coaddm5 values come out really small (i.e. min= 10**-314). + # Zero them out. if coaddm5 < 1: coaddm5 = 0 @@ -141,7 +156,8 @@ def gal_count_all(apparent_mag, coaddm5): num_gal, int_err = scipy.integrate.quad(gal_count_bin, -np.inf, upper_mag_limit, args=coaddm5) if normalized_mock_catalog_counts and not cfhtls_counts: - # Normalize the counts from mock catalogs to match up to CFHTLS counts fori<25.5 galaxy catalog + # Normalize the counts from mock catalogs to match up to + # CFHTLS counts fori<25.5 galaxy catalog # Found the scaling factor separately. num_gal = normalization_constant * num_gal diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/masking_algorithm_generalized.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/masking_algorithm_generalized.py deleted file mode 100644 index f4bba272a..000000000 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/masking_algorithm_generalized.py +++ /dev/null @@ -1,244 +0,0 @@ -##################################################################################################### -# Purpose: change the values/mask of a metricBundle in the pixels with a certain value/mask. -# Example applicaton: mask the outermost/shallow edge of skymaps. -# -# Humna Awan: humna.awan@rutgers.edu -##################################################################################################### - -__all__ = ("masking_algorithm_generalized",) - -import copy - -import healpy as hp -import matplotlib.cm as cm -import matplotlib.pyplot as plt -import numpy as np - -import rubin_sim.maf.plots as plots - - -def masking_algorithm_generalized( - my_bundles, - plot_handler, - data_label, - nside=128, - find_value="unmasked", - relation="=", - new_value="masked", - pixel_radius=6, - return_border_indices=False, - print_intermediate_info=False, - plot_intermediate_plots=True, - print_final_info=True, - plot_final_plots=True, - sky_map_color_min=None, - sky_map_color_max=None, -): - """ - Assign new_value to all pixels in a skymap within pixel_radius of pixels with value <, >, or = find_value. - - Parameters - ---------- - my_bundles : `dict` {`rubin_sim.maf.MetricBundles`} - a dictionary for metricBundles. - plot_handler : `rubin_sim.maf.plots.plotHandler.PlotHandler` - data_label : `str` - description of the data, i.e. 'numGal' - nside : `int` - HEALpix resolution parameter. Default: 128 - find_value : `str` - if related to mask, must be either 'masked' or 'unmasked'. otherwise, must be a number. - Default: 'unmasked' - relation : `str` - must be '>','=','<'. Default: '=' - new_value : `str` - if related to mask, must be either 'masked' or 'unmasked'; otherwise, must be a number. - Default: 'masked' - pixel_radius : `int` - number of pixels to consider around a given pixel. Default: 6 - return_border_indices : `bool` - set to True to return the array of indices of the pixels whose values/mask are changed. Default: False - print_intermediate_info : `bool` - set to False if do not want to print intermediate info. Default: True - plot_intermediate_plots : `bool` - set to False if do not want to plot intermediate plots. Default: True - print_final_info : `bool` - set to False if do not want to print final info, i.e. total pixels changed. Default: True - plot_final_plots : `bool` - set to False if do not want to plot the final plots. Default: True - sky_map_color_min : float - color_min label value for skymap plot_dict label. Default: None - sky_map_color_max : float - color_max label value for skymap plot_dict label. Default: None - """ - # find pixels such that (pixelValue (relation) find_value) AND their neighbors dont have that (relation) find_value. - # then assign new_value to all these pixels. - # relation must be '>','=','<' - # data indices are the pixels numbers .. - # ------------------------------------------------------------------------ - # check whether need to mask anything at all - if pixel_radius == 0: - print("No masking/changing of the original data.") - if return_border_indices: - borders = {} - for dither in my_bundles: - borders[dither] = [] - - return [my_bundles, borders] - else: - return my_bundles - # ------------------------------------------------------------------------ - # make sure that relation is compatible with find_value - if (find_value == "masked") | (find_value == "unmasked"): - if relation != "=": - print('ERROR: must have relation== "=" if find_value is related to mask.') - print('Setting: relation= "="\n') - relation = "=" - # ------------------------------------------------------------------------ - # translate find_value into what has to be assigned - find_value_to_consider = find_value - if find_value.__contains__("mask"): - if find_value == "masked": - find_value_to_consider = True - if find_value == "unmasked": - find_value_to_consider = False - - # translate new_value into what has to be assigned - new_value_to_assign = new_value - if new_value.__contains__("mask"): - if new_value == "masked": - new_value_to_assign = True - if new_value == "unmasked": - new_value_to_assign = False - - # ------------------------------------------------------------------------ - borders = {} - for dither in my_bundles: - total_border_pixel = [] - if print_intermediate_info: - print("Survey strategy: %s" % dither) - - # find the array to look at. - if (find_value).__contains__("mask"): - orig_array = my_bundles[dither].metricValues.mask.copy() # mask array - else: - orig_array = my_bundles[dither].metricValues.data.copy() # data array - - for r in range(0, pixel_radius): - border_pixel = [] - temp_copy = copy.deepcopy(my_bundles) - # ignore the pixels whose neighbors formed the border in previous run - if r != 0: - orig_array[total_border_pixel] = new_value_to_assign - - # find the pixels that satisfy the relation with find_value and whose neighbors dont - for i in range(0, len(orig_array)): - neighbors_pixels = hp.get_all_neighbours(nside, i) # i is the pixel number - for j in neighbors_pixels: - condition = None - if relation == "<": - condition = (orig_array[i] < find_value_to_consider) & ( - orig_array[j] >= find_value_to_consider - ) - if relation == "=": - condition = (orig_array[i] == find_value_to_consider) & ( - orig_array[j] != find_value_to_consider - ) - if relation == ">": - condition = (orig_array[i] > find_value_to_consider) & ( - orig_array[j] <= find_value_to_consider - ) - if condition == None: - raise ValueError("ERROR: invalid relation: %s" % relation) - - if condition: - if j != -1: # -1 entries correspond to inexistent neighbors - border_pixel.append(i) - - border_pixel = np.unique(border_pixel) - total_border_pixel.extend(border_pixel) - - if print_intermediate_info: - print("Border pixels from run %s: %s" % (r + 1, len(border_pixel))) - print("Total pixels so far: %s\n" % len(total_border_pixel)) - - # plot found pixels - if plot_intermediate_plots: - if new_value.__contains__("mask"): - temp_copy[dither].metricValues.mask[:] = new_value_to_assign - temp_copy[dither].metricValues.mask[total_border_pixel] = not (new_value_to_assign) - temp_copy[dither].metricValues.data[total_border_pixel] = -500 - plot_dict = { - "xlabel": data_label, - "title": "%s: %s Round # %s" % (dither, data_label, r + 1), - "log_scale": False, - "labelsize": 9, - "color_min": -550, - "color_max": 550, - "cmap": cm.jet, - } - else: - temp_copy[dither].metricValues.mask[:] = True - temp_copy[dither].metricValues.mask[total_border_pixel] = False - temp_copy[dither].metricValues.data[total_border_pixel] = new_value_to_assign - plot_dict = { - "xlabel": data_label, - "title": "%s %s Round # %s" % (dither, data_label, r + 1), - "log_scale": False, - "labelsize": 9, - "maxl": 500, - "cmap": cm.jet, - } - temp_copy[dither].set_plot_dict(plot_dict) - temp_copy[dither].set_plot_funcs([plots.HealpixSkyMap(), plots.HealpixPowerSpectrum()]) - temp_copy[dither].plot(plot_handler=plot_handler) - plt.show() - # save the found pixels with the appropriate key - borders[dither] = total_border_pixel - - # ------------------------------------------------------------------------ - # change the original map/array now. - for dither in my_bundles: - total_border_pixel = borders[dither] - - if print_final_info: - print("Survey strategy: %s" % dither) - print("Total pixels changed: %s\n" % len(total_border_pixel)) - - if new_value.__contains__("mask"): - my_bundles[dither].metricValues.mask[total_border_pixel] = new_value_to_assign - else: - my_bundles[dither].metricValues.data[total_border_pixel] = new_value_to_assign - - if plot_final_plots: - # skymap - plot_dict = { - "xlabel": data_label, - "title": "%s: %s MaskedMap; pixel_radius: %s " % (dither, data_label, pixel_radius), - "log_scale": False, - "labelsize": 8, - "color_min": sky_map_color_min, - "color_max": sky_map_color_max, - "cmap": cm.jet, - } - my_bundles[dither].set_plot_dict(plot_dict) - my_bundles[dither].set_plot_funcs([plots.HealpixSkyMap()]) - my_bundles[dither].plot(plot_handler=plot_handler) - # power spectrum - plot_dict = { - "xlabel": data_label, - "title": "%s: %s MaskedMap; pixel_radius: %s " % (dither, data_label, pixel_radius), - "log_scale": False, - "labelsize": 12, - "maxl": 500, - "cmap": cm.jet, - } - my_bundles[dither].set_plot_dict(plot_dict) - my_bundles[dither].set_plot_funcs([plots.HealpixPowerSpectrum()]) - my_bundles[dither].plot(plot_handler=plot_handler) - plt.show() - - if return_border_indices: - return [my_bundles, borders] - else: - return my_bundles diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/os_bias_analysis.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/os_bias_analysis.py deleted file mode 100644 index 527fedbe3..000000000 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/os_bias_analysis.py +++ /dev/null @@ -1,1049 +0,0 @@ -############################################################################################################################### -# The goal here is to implement Eq. 9.4 from the LSST community WP, which defines our FoM, and create plots. -# -# Humna Awan: humna.awan@rutgers.edu -# -############################################################################################################################### -__all__ = ( - "get_fsky", - "get_theory_spectra", - "get_outdir_name", - "return_cls", - "calc_os_bias_err", - "get_fom", - "os_bias_overplots", - "os_bias_overplots_diff_dbs", -) - -import datetime -import os -from collections import OrderedDict - -import healpy as hp -import matplotlib.pyplot as plt -import numpy as np - -from rubin_sim.maf.maf_contrib.lss_obs_strategy.constants_for_pipeline import power_law_const_a - - -############################################################################################################################### -# calculate fsky for a bundle -def get_fsky(outdir, band="i", print_fsky=True): - """ - - Calculate the fraction of the sky observed in a survey. The data must have been saved as - .npz files in the given output directory. The method only looks at the mask of the data array. - - Filenames should be in the format: __.npz - - Parameters - ------------------- - outdir : str - name of the output directory where the data-to-look-at is. - band: str - band to consider. Default: 'i' - print_fsky: `bool` - set to True if want to print( out fsky. Default: True - - """ - filenames = [f for f in os.listdir(outdir) if any([f.endswith("npz")])] - fsky = {} - for filename in filenames: - # print('Reading in %s for fsky'%filename) - dither_strategy = filename.split("%s_" % band)[1].split(".npz")[0] - data = np.load("%s/%s" % (outdir, filename)) - # total number of pixels in the sky - tot_pix = float(len(data["mask"])) - in_survey_pix = float(len(np.where(data["mask"] == False)[0])) - fsky[dither_strategy] = in_survey_pix / tot_pix - if print_fsky: - print("%s fsky: %s\n" % (dither_strategy, fsky[dither_strategy])) - return fsky - - -############################################################################################################################### -def get_theory_spectra(mock_data_path, mag_cut=25.6, plot_spectra=True, nside=256): - """ - - Return the data for the five redshift bins, read from the files containing the - with BAO galaxy power spectra from Hu Zhan. - - Parameters - ------------------- - mock_data_path : `str` - path to the folder with the theory spectra - mag_cut: `float` - r-band magnitude cut as the identifer in the filename from Hu. - allowed options: 24.0, 25.6, 27.5. Default: 25.6 - plot_spectra : `bool` - set to True if want to plot out the skymaps. Default: True - nside : `int` - HEALpix resolution parameter. Default: 256 - - Returns - ------- - ell : `np.ndarray` - array containing the ells - w_bao_cls : `dict` - keys = zbin_tags; data = spectra (pixelized for specified nside) - surf_num_density : `float` - surface number density in 1/Sr - """ - # read in the galaxy power spectra with the BAO - filename = "%s/cls015-200z_r%s.bins" % (mock_data_path, mag_cut) - print("\nReading in %s for theory cls." % filename) - shot_noise_data = np.genfromtxt( - filename - ) # last column = surface number density for each bin in 1/(sq arcmin) - - filename = "%s/cls015-200z_r%s" % (mock_data_path, mag_cut) - print("Reading in %s for theory cls." % filename) - all_data = np.genfromtxt(filename) - - # set up to read the data - ell = [] - w_bao_cls = OrderedDict() - w_bao_cls["0.15, and each panel can have OS bias - uncertainity from many different galaxy catalogs. Panel legends will specify the redshift bin - and the magnitude cut. - - Parameters - ------------------- - * out_dir: str: output directory where the output plots will be saved; a folder named - 'os_bias_overplots' will be created in the directory, if its not there already. - * data_paths: list of strings: list of strings containing the paths where the artificialStructure data will be - found for the filters specified. - * lim_mags_i: list of floats: list of the i-band magnitude cuts to get the data for. - * legend_labels: list of strings: list of the 'tags' for each case; will be used in the legends. e.g. if - lim_mags_i=[24.0, 25.3], legend_labels could be ['i<24.0', 'i<25.3'] - or ['r<24.4', 'i<25.7'] if want to label stuff with r-band limits. - * fsky_dith_dict: dict: dictionary containing the fraction of sky covered; keys should be the dither strategies. - The function get_fsky outputs the right dictionary. - * fsky_best: float: best fsky for the survey to compare everything relative to. - * mock_data_path: str: path to the mock data to consider - * run_name: str: run name tag to identify the output of specified OpSim output. - * theory_lim_mag: float: magnitude cut as the identifer in the filename from Hu. - Allowed options: 24.0, 25.6, 27.5 - * specified_dith_only: list of string: list of the names (strings) of the dither strategies to consider, e.g. - if want to plot only NoDither, specified_dith_only= ['NoDither']. If - nothing is specified, all the dither strategies will be considered - (based on the npy files available for the runs). Default: None - * run_name_filtag: str: run name file tag. Default: None - * filters: list of strings: list containing the bands (in strings) to be used to calculate the OS bias - and its error. should contain AT LEAST two bands. - e.g. if filters=['g', 'r'], OS bias (at every ell) will be calculated as the - mean across g and r cls, while the bias error (at every ell) will be calculated - as the std. dev. across g and r cls. - Default: ['u', 'g', 'r', 'i'] - * nside: int: HEALpix resolution parameter. Default: 256 - * pixel_radius: int: number of pixels to mask along the shallow border. Default: 14 - * yr_cutoff: int: year cut to restrict analysis to only a subset of the survey. - Must range from 1 to 9, or None for the full survey analysis (10 yrs). - Default: None - * zbin: str: options include '0.15 4: - leg = axis.legend( - labelspacing=0.001, - ) - else: - leg = axis.legend() - axis.ticklabel_format(axis="y", style="sci", scilimits=(-2, 2)) - for legobj in leg.legendHandles: - legobj.set_linewidth(2.0) - row += 1 - if row > nrows - 1: - row = 0 - col += 1 - # ---------------------------------------------------------------------------------------- - if suptitle is not None: - plt.suptitle(suptitle) - # turn stuff off if have odd number of panels - if (max_entries % 2 != 0) and (max_entries > 1): # i.e. have odd number of diths - ax[nrows - 1, ncols - 1].axis("off") - - width = 20 - fig.set_size_inches(width, int(nrows * 30 / 7.0)) - - # set up the filename - dith_tag = "" - if specified_dith_only is not None: - dith_tag = "_%sdiths" % max_entries - if run_name_filetag is None: - run_name_filetag = run_name - date_tag = datetime.date.isoformat(datetime.date.today()) - bias_type_tag = "".join(str(x) for x in filters) - ell_tag = "_%s, and each panel can have - OS bias uncertainity from many different cadences. Panel legends will specify the redshift bin - and OpSim output tag. - - Parameters - ------------------- - * out_dir: str: main directory where the output plots should be saved; a folder named - 'os_bias_overplots' will be created in the directory, if its not there already. - * data_path: str: path to the artificialStructure data. - * run_names: list of str: list for run name tags to identify the output of specified OpSim outputs. - * legend_labels: list of strings: list of the 'tags' for each case; will be used in the legends. e.g. if - run_names=['enigma1189', 'minion1016'], legend_labels could be - ['enigma_1189', 'minion_1016']. - * fsky_dict: dict: dictionary of the dictionaries containing the fraction of sky covered for each of the - cadences. The keys should match the identifier; fsky_dict[indentifiers[:]] should have - the dither strategies as the keys. - * fsky_best: float: best fsky for the survey to compare everything relative to. - * mock_data_path: str: path to the mock data to consider - * theory_lim_mag: float: magnitude cut as the identifer in the filename from Hu. - Allowed options: 24.0, 25.6, 27.5 - * lim_mag_i: float: i-band magnitude cut to get the data for. - * specified_dith_only: list of string: list of the names (strings) of the dither strategies to consider, e.g. - if want to plot only NoDither, specified_dith_only=['NoDither']. If - nothing is specified, all the dither strategies will be considered - (based on the npy files available for the runs). Default: None - * filters: list of strings: list containing the bands (in strings) to be used to calculate the OS bias - and its error. should contain AT LEAST two bands. - e.g. if filters=['g', 'r'], OS bias (at every ell) will be calculated as the - mean across g and r c_ells, while the bias error (at every ell) will be calculated - as the std. dev. across g and r c_ells. - Default: ['u', 'g', 'r', 'i'] - * nside: int: HEALpix resolution parameter. Default: 256 - * pixel_radius: int: number of pixels to mask along the shallow border. Default: 14 - * yr_cutoff: int: year cut to restrict analysis to only a subset of the survey. - Must range from 1 to 9, or None for the full survey analysis (10 yrs). - Default: None - * zbin: str: options include '0.15 4: - leg = axis.legend( - labelspacing=0.001, - ) - else: - leg = axis.legend() - axis.ticklabel_format(axis="y", style="sci", scilimits=(-2, 2)) - for legobj in leg.legendHandles: - legobj.set_linewidth(2.0) - col += 1 - if col > ncols - 1: - col = 0 - row += 1 - # title to the plot? - if suptitle is not None: - plt.suptitle(suptitle, y=1.05) - # turn off axes on unused panels - if (max_entries % 2 != 0) and (max_entries > 1): # i.e. have odd number of diths - ax[nrows - 1, ncols - 1].axis("off") - fig.set_size_inches(20, int(nrows * 30 / 7.0)) - - # set up the filename - dith_tag = "" - if specified_dith_only is not None: - dith_tag = "%sdith" % max_entries - date_tag = datetime.date.isoformat(datetime.date.today()) - bias_type_tag = "".join(str(x) for x in filters) - ell_tag = "_%s Date: Thu, 1 Feb 2024 02:03:29 -0800 Subject: [PATCH 17/26] ruff and docstrings ruff cleanup ruff and docstrings in metric_bundles Ruff and docstrings --- rubin_sim/maf/maf_contrib/__init__.py | 1 - rubin_sim/maf/maf_contrib/lss_metrics.py | 3 +- .../maf_contrib/lss_obs_strategy/__init__.py | 5 - .../lv_dwarfs/lv_dwarfs_metrics.py | 140 ++--- .../periodic_star_modulation_metric.py | 81 +-- .../maf/maf_contrib/star_count_mass_metric.py | 3 +- .../maf/maf_contrib/star_count_metric.py | 1 + .../maf/maf_contrib/star_counts/coords.py | 22 +- .../maf/maf_contrib/star_counts/starcount.py | 17 - .../star_counts/starcount_bymass.py | 17 +- .../maf_contrib/star_counts/stellardensity.py | 7 - rubin_sim/maf/maf_contrib/var_depth_metric.py | 48 +- rubin_sim/maf/maf_contrib/xrb_metrics.py | 62 ++- .../young_stellar_objects_metric.py | 48 +- rubin_sim/maf/metric_bundles/metric_bundle.py | 187 ++++--- .../maf/metric_bundles/metric_bundle_group.py | 159 +++--- .../maf/metric_bundles/mo_metric_bundle.py | 135 +++-- rubin_sim/maf/metrics/__init__.py | 2 - rubin_sim/maf/metrics/agn_time_lag_metric.py | 3 +- rubin_sim/maf/metrics/agnstructure.py | 38 +- rubin_sim/maf/metrics/area_summary_metrics.py | 31 +- rubin_sim/maf/metrics/base_metric.py | 26 +- rubin_sim/maf/metrics/brown_dwarf_metric.py | 37 +- rubin_sim/maf/metrics/cadence_metrics.py | 78 ++- rubin_sim/maf/metrics/calibration_metrics.py | 236 +++++---- rubin_sim/maf/metrics/coverage_metric.py | 20 +- rubin_sim/maf/metrics/crowding_metric.py | 165 +++--- rubin_sim/maf/metrics/cumulative_metric.py | 6 +- rubin_sim/maf/metrics/dcr_metric.py | 14 +- rubin_sim/maf/metrics/exgal_m5.py | 21 +- .../maf/metrics/galactic_plane_metrics.py | 86 +-- .../metrics/galplane_time_sampling_metrics.py | 63 ++- rubin_sim/maf/metrics/hourglass_metric.py | 21 +- rubin_sim/maf/metrics/long_gap_agn_metric.py | 45 -- rubin_sim/maf/metrics/mo_metrics.py | 491 ++++++++++-------- rubin_sim/maf/metrics/pair_metric.py | 26 +- .../maf/metrics/periodic_detect_metric.py | 52 +- rubin_sim/maf/metrics/phase_gap_metric.py | 31 +- .../maf/metrics/qso_number_counts_metric.py | 32 +- rubin_sim/maf/metrics/scaling_metrics.py | 23 +- rubin_sim/maf/metrics/season_metrics.py | 110 ++-- rubin_sim/maf/metrics/simple_metrics.py | 9 +- rubin_sim/maf/metrics/slew_metrics.py | 71 --- rubin_sim/maf/metrics/sn_cadence_metric.py | 9 +- rubin_sim/maf/metrics/sn_n_sn_metric.py | 96 ++-- rubin_sim/maf/plots/spatial_plotters.py | 33 +- rubin_sim/maf/stackers/coord_stackers.py | 18 +- rubin_sim/maf/utils/generate_fov_map.py | 1 + rubin_sim/maf/web/maf_run_results.py | 4 +- 49 files changed, 1558 insertions(+), 1276 deletions(-) delete mode 100644 rubin_sim/maf/metrics/long_gap_agn_metric.py delete mode 100644 rubin_sim/maf/metrics/slew_metrics.py diff --git a/rubin_sim/maf/maf_contrib/__init__.py b/rubin_sim/maf/maf_contrib/__init__.py index 2c0b18396..2c9235f63 100644 --- a/rubin_sim/maf/maf_contrib/__init__.py +++ b/rubin_sim/maf/maf_contrib/__init__.py @@ -19,7 +19,6 @@ from .tdes_pop_metric import * from .triplet_metric import * from .var_depth_metric import * -from .var_metrics import * from .xrb_metrics import * from .young_stellar_objects_metric import * from .calculate_lsst_field_visibility_astropy import * diff --git a/rubin_sim/maf/maf_contrib/lss_metrics.py b/rubin_sim/maf/maf_contrib/lss_metrics.py index 8504b7c3c..287b1e83b 100644 --- a/rubin_sim/maf/maf_contrib/lss_metrics.py +++ b/rubin_sim/maf/maf_contrib/lss_metrics.py @@ -8,7 +8,8 @@ class GalaxyCountsMetric(BaseMetric): - """Estimate the number of galaxies expected at a particular coadded depth. + """Estimate the number of galaxies expected at a particular (extragalactic) + coadded depth. """ def __init__(self, m5_col="fiveSigmaDepth", nside=128, metric_name="GalaxyCounts", **kwargs): diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/__init__.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/__init__.py index d8a5dcc0a..f47aa09ba 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/__init__.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/__init__.py @@ -1,8 +1,3 @@ -from .alm_plots import * -from .artificial_structure_calculation import * -from .coadd_m5_analysis import * from .constants_for_pipeline import * from .galaxy_counts_metric_extended import * from .galaxy_counts_with_pixel_calibration import * -from .masking_algorithm_generalized import * -from .os_bias_analysis import * diff --git a/rubin_sim/maf/maf_contrib/lv_dwarfs/lv_dwarfs_metrics.py b/rubin_sim/maf/maf_contrib/lv_dwarfs/lv_dwarfs_metrics.py index e1db87462..eac8c63cf 100644 --- a/rubin_sim/maf/maf_contrib/lv_dwarfs/lv_dwarfs_metrics.py +++ b/rubin_sim/maf/maf_contrib/lv_dwarfs/lv_dwarfs_metrics.py @@ -8,7 +8,6 @@ import os import astropy.units as u -import healpy as hp import numpy as np from astropy.coordinates import SkyCoord from astropy.io import ascii, fits @@ -20,13 +19,14 @@ def generate_known_lv_dwarf_slicer(): - """Read the Karachentsev+ catalog of nearby galaxies, and put the info about them - into a UserPointSlicer object. + """Read the Karachentsev+ catalog of nearby galaxies, + and put the info about them into a UserPointSlicer object. """ filename = os.path.join(get_data_dir(), "maf/lvdwarfs", "lsst_galaxies_1p25to9Mpc_table.fits") lv_dat0 = fits.getdata(filename) - # Keep only galaxies at dec < 35 deg., and with stellar masses > 10^7 M_Sun (and <1e14). + # Keep only galaxies at dec < 35 deg., + # and with stellar masses > 10^7 M_Sun (and <1e14). lv_dat_cuts = (lv_dat0["dec"] < 35.0) & (lv_dat0["MStars"] > 1e7) & (lv_dat0["MStars"] < 1e14) lv_dat = lv_dat0[lv_dat_cuts] @@ -38,11 +38,13 @@ def generate_known_lv_dwarf_slicer(): return slicer -# make a simulated LF for old galaxy of given integrated B, distance modulus mu, in any of filters ugrizY +# make a simulated LF for old galaxy of given integrated B, +# distance modulus mu, in any of filters ugrizY def make__fake_old_galaxy_lf(int_b, mu, filtername): """ - Make a simulated luminosity function for an old (10 Gyr) dwarf galaxy of given - integrated B magnitude, at a given distance modulus, in any of the filters ugrizY. + Make a simulated luminosity function for an old (10 Gyr) dwarf + galaxy of given integrated B magnitude, at a given distance modulus, + in any of the filters ugrizy. Parameters ---------- @@ -56,7 +58,8 @@ def make__fake_old_galaxy_lf(int_b, mu, filtername): if filtername == "y": filtername == "Y" model_bmag = 6.856379 # integrated B mag of the model LF being read - # Read a simulated luminosity function of [M/H]=-1.5, 10 Gyr stellar population: + # Read a simulated luminosity function of [M/H]=-1.5, 10 Gyr stellar + # population: filename = os.path.join(get_data_dir(), "maf/lvdwarfs", "LF_-1.5_10Gyr.dat") LF = ascii.read(filename, header_start=12) mags = LF["magbinc"] @@ -73,8 +76,8 @@ def make__fake_old_galaxy_lf(int_b, mu, filtername): def make_dwarf_lf_dicts(): """ Create dicts containing g- and i-band LFs for simulated dwarfs between - -10 < M_B < +3, so they can simply be looked up rather than having to - recreate them each time. Dict is keyed on M_B value. + -10 < M_B < +3, so they can simply be looked up rather than having to + recreate them each time. Dict is keyed on M_B value. """ lf_dict_i = {} lf_dict_g = {} @@ -96,14 +99,15 @@ def _sum_luminosity(l_fmags, l_fcounts): """ Sum the luminosities from a given luminosity function. - Uses the first bin's magnitude as a reference, sums luminosities relative to - that reference star, then converts back to magnitude at the end. + Uses the first bin's magnitude as a reference, sums luminosities + relative to that reference star, + then converts back to magnitude at the end. Parameters ---------- - l_fmags : np.array, `float` + l_fmags : `np.array,` `float` Magnitude bin values from the simulated LF. - l_fcounts : np.array, `int` + l_fcounts : `np.array`, `int` Number of stars in each magnitude bin. """ magref = l_fmags[0] @@ -156,20 +160,21 @@ def _dwarf_sblimit(glim, ilim, nstars, lf_dict_g, lf_dict_i, distlim, rng): g_l_fmags0, g_l_fcounts0 = lf_dict_g[mbkey] i_l_fcounts = rng.poisson(i_l_fcounts0) g_l_fcounts = rng.poisson(g_l_fcounts0) - i_l_fmags = i_l_fmags0 + distmod_lim # Add the distance modulus to make it apparent mags - g_l_fmags = g_l_fmags0 + distmod_lim # Add the distance modulus to make it apparent mags - # print(i_l_fcounts0-i_l_fcounts) + # Add the distance modulus to make it apparent mags + i_l_fmags = i_l_fmags0 + distmod_lim + # Add the distance modulus to make it apparent mags + g_l_fmags = g_l_fmags0 + distmod_lim gsel = g_l_fmags <= glim isel = i_l_fmags <= ilim ng = np.sum(g_l_fcounts[gsel]) ni = np.sum(i_l_fcounts[isel]) - # print('fake_mb: ',fake_mb, ' ng: ',ng, ' ni: ', ni, ' nstars: ', nstars) fake_mb += 0.1 if fake_mb > -9.9 and (ng > 0) and (ni > 0): gmag_tot = _sum_luminosity(g_l_fmags[gsel], g_l_fcounts[gsel]) - distmod_lim imag_tot = _sum_luminosity(i_l_fmags[isel], i_l_fcounts[isel]) - distmod_lim - # S = m + 2.5logA, where in this case things are in sq. arcmin, so A = 1 arcmin^2 = 3600 arcsec^2 + # S = m + 2.5logA, where in this case things are in sq. arcmin, + # so A = 1 arcmin^2 = 3600 arcsec^2 sbtot_g = distmod_lim + gmag_tot + 2.5 * np.log10(3600.0) sbtot_i = distmod_lim + imag_tot + 2.5 * np.log10(3600.0) mg_lim = gmag_tot @@ -198,35 +203,39 @@ def _dwarf_sblimit(glim, ilim, nstars, lf_dict_g, lf_dict_i, distlim, rng): class LVDwarfsMetric(BaseMetric): """ - Estimate the detection limit in total dwarf luminosity for resolved dwarf galaxies - at a given distance. + Estimate the detection limit in total dwarf luminosity for + resolved dwarf galaxies at a given distance. - This metric class uses simulated luminosity functions of dwarf galaxies with - known (assumed) luminosities to estimate the detection limit (in total dwarf - luminosity, M_V) for resolved dwarf galaxies at a given distance. It can be - applied to either known galaxies with their discrete positions and distances, - or an entire survey simulation with a fixed distance limit. + This metric class uses simulated luminosity functions of dwarf galaxies + with known (assumed) luminosities to estimate the detection limit + (in total dwarf luminosity, M_V) for resolved dwarf galaxies at a + given distance. It can be applied to either known galaxies with their + discrete positions and distances, or an entire survey simulation with + a fixed distance limit. - In the default use (with the KnownLvDwarfsSlicer), it returns detection limits for - a catalog of known local volume dwarfs, from the Karachentsev+ catalog of nearby galaxies. + In the default use (with the KnownLvDwarfsSlicer), + it returns detection limits for a catalog of known local volume dwarfs, + from the Karachentsev+ catalog of nearby galaxies. Parameters ---------- - radius : `float`, default=2.45, - Radius of the field being considered (for discrete fields only). By default, - UserPointSlicer uses a 2.45-deg field radius. - distlim : `float`, - Distance threshold in Mpc for which to calculate the limiting dwarf detection - luminosity. Only needed for healpix slicers, but *required* if healpix is used. - cmd_frac : `float`, default=0.1, - Fraction of the total area of the color-magnitude diagram that is spanned - by the tracer selection criteria. (e.g., the size of a box in color and - magnitude to select RGB-star candidates) - stargal_contamination : `float`, default=0.4, - Fraction of objects in CMD selection region that are actually unresolved - galaxies that were mis-classified as stars. - nsigma : `float`, default=10.0, - Required detection significance to declare a simulated dwarf "detected." + radius : `float` + Radius of the field being considered (for discrete fields only). + By default, UserPointSlicer uses a 2.45-deg field radius. + distlim : `float` + Distance threshold in Mpc for which to calculate the limiting + dwarf detection luminosity. Only needed for healpix slicers, + but *required* if healpix is used. + cmd_frac : `float` + Fraction of the total area of the color-magnitude diagram + that is spanned by the tracer selection criteria. (e.g., + the size of a box in color and magnitude to select RGB-star candidates) + stargal_contamination : `float` + Fraction of objects in CMD selection region that are actually + unresolved galaxies that were mis-classified as stars. + nsigma : `float` + Required detection significance to declare a simulated + dwarf "detected." """ def __init__( @@ -256,7 +265,8 @@ def __init__( self.distlim = None filename = os.path.join(get_data_dir(), "maf/lvdwarfs", "lsst_galaxies_1p25to9Mpc_table.fits") lv_dat0 = fits.getdata(filename) - # Keep only galaxies at dec < 35 deg., and with stellar masses > 10^7 M_Sun. + # Keep only galaxies at dec < 35 deg., + # and with stellar masses > 10^7 M_Sun. lv_dat_cuts = (lv_dat0["dec"] < 35.0) & (lv_dat0["MStars"] > 1e7) & (lv_dat0["MStars"] < 1e14) lv_dat = lv_dat0[lv_dat_cuts] sc_dat = SkyCoord( @@ -281,11 +291,13 @@ def __init__( self.galaxy_counts_metric.scale = 1 cols = [self.m5_col, self.filter_col] - # GalaxyCountsMetric needs the DustMap, and StarDensityMetric needs StellarDensityMap: + # GalaxyCountsMetric needs the DustMap, + # and StarDensityMetric needs StellarDensityMap: maps = ["DustMap", "StellarDensityMap"] super().__init__(col=cols, metric_name=metric_name, maps=maps, units="M_V limit", **kwargs) - # Set up a random number generator, so that metric results are repeatable + # Set up a random number generator, + # so that metric results are repeatable self.rng = np.random.default_rng(seed) def run(self, data_slice, slice_point=None): @@ -296,16 +308,19 @@ def run(self, data_slice, slice_point=None): if len(np.where(gband)[0]) == 0 or len(np.where(iband)[0]) == 0: return self.badval - # calculate the dust-extincted coadded 5-sigma limiting mags in the g and i bands: + # calculate the dust-extincted coadded 5-sigma limiting mags + # in the g and i bands: g5 = self.exgal_coaddm5.run(data_slice[gband], slice_point) i5 = self.exgal_coaddm5.run(data_slice[iband], slice_point) if g5 < 15 or i5 < 15: - # If the limiting magnitudes won't even match the stellar density maps, exit + # If the limiting magnitudes won't even match the + # stellar density maps, exit return self.badval # Find the number of stars per sq arcsecond at the i band limit - # (this is a bit of a hack to get the starDensityMetric to calculate the nstars at this mag exactly) + # (this is a bit of a hack to get the starDensityMetric to + # calculate the nstars at this mag exactly) star_i5 = min(27.9, i5) self.star_density_metric.magLimit = star_i5 @@ -315,21 +330,25 @@ def run(self, data_slice, slice_point=None): ngal_sqdeg = self.galaxy_counts_metric.run(data_slice, slice_point) # GalaxyCountsMetric is undefined in some places. These cases return # zero; catch these and set the galaxy counts in those regions to a - # very high value. (this may not be true after catching earlier no-visits issues) + # very high value. (this may not be true after catching earlier + # no-visits issues) if ngal_sqdeg < 10.0: ngal_sqdeg = 1e7 - # Convert from per sq deg and per sq arcsecond into #'s per sq arcminute + # Convert from per sq deg and per sq arcsecond + # into #'s per sq arcminute ngal_sqarcmin = ngal_sqdeg / 3600 nstar_sqarcmin = nstar_sqarcsec * 3600 if ngal_sqarcmin < 0 or nstar_sqarcmin < 0: print( f"Here be a problem - ngals_sqarcmin {ngal_sqarcmin} or nstar_sqarcmin {nstar_sqarcmin} " - f'are negative. depths: {g5}, {i5}. {slice_point["ra"], slice_point["dec"], slice_point["sid"]}' + f'are negative. depths: {g5}, {i5}. ' + f'{slice_point["ra"], slice_point["dec"], slice_point["sid"]}' ) - # The number of stars required to reach nsigma is nsigma times the Poisson - # fluctuations of the background (stars+galaxies contamination): + # The number of stars required to reach nsigma is + # nsigma times the Poisson fluctuations of the background + # (stars+galaxies contamination): nstars_required = self.nsigma * np.sqrt( (ngal_sqarcmin * self.cmd_frac * self.stargal_contamination) + (nstar_sqarcmin * self.cmd_frac) ) @@ -338,13 +357,12 @@ def run(self, data_slice, slice_point=None): # Use the distlim if a healpix slicer is input distlim = self.distlim else: - # Use discrete distances for known galaxies if a UserPointSlicer: + # Use discrete distances for known galaxies if a + # UserPointSlicer: distlim = slice_point["distance"] * u.Mpc - # sc_slice = SkyCoord(ra=slice_point['ra']*u.rad, dec=slice_point['dec']*u.rad) - # seps = sc_slice.separation(self.sc_dat) - # distlim = self.sc_dat[seps.argmin()].distance - # Calculate the limiting luminosity and surface brightness based on g5 and i5: + # Calculate the limiting luminosity and surface brightness + # based on g5 and i5: mg_lim, mi_lim, sb_g_lim, sb_i_lim, flag_lim = _dwarf_sblimit( g5, i5, @@ -359,8 +377,8 @@ def run(self, data_slice, slice_point=None): mv = self.badval else: - # To go from HSC g and i bands to V, use the conversion from Appendix A - # of Komiyama+2018, ApJ, 853, 29: + # To go from HSC g and i bands to V, use the conversion + # from Appendix A of Komiyama+2018, ApJ, 853, 29: # V = g_hsc - 0.371*(gi_hsc)-0.068 mv = mg_lim - 0.371 * (mg_lim - mi_lim) - 0.068 # sbv = sb_g_lim - 0.371 * (sb_g_lim - sb_i_lim) - 0.068 diff --git a/rubin_sim/maf/maf_contrib/periodic_star_modulation_metric.py b/rubin_sim/maf/maf_contrib/periodic_star_modulation_metric.py index 34a88fa6c..b3c18d6ba 100644 --- a/rubin_sim/maf/maf_contrib/periodic_star_modulation_metric.py +++ b/rubin_sim/maf/maf_contrib/periodic_star_modulation_metric.py @@ -12,38 +12,46 @@ from .periodic_star_metric import PeriodicStar """ This metric is based on the PeriodicStar metric - It was modified in a way to reproduce attempts to identify period/ phase modulation (Blazhko effect) - in RR Lyrae stars. - We are not implementing a period/ phase modulation in the light curve, but rather use short baselines - (e.g.: 20 days) of observations to test how well we can recover the period, phase and amplitude. We - do this as such an attempt is also useful for other purposes, i.e. if we want to test whether we - can just recover period, phase and amplitude from short baselines at all, without necessarily having + It was modified in a way to reproduce attempts to identify + phase modulation (Blazhko effect) in RR Lyrae stars. + We are not implementing a period/ phase modulation in the light curve, + but rather use short baselines (e.g.: 20 days) of observations to test + how well we can recover the period, phase and amplitude. + We do this as such an attempt is also useful for other purposes, + i.e. if we want to test whether we can just recover period, phase + and amplitude from short baselines at all, without necessarily having in mind to look for period/ phase modulations. - Like in the PeriodicStar metric, the light curve of an RR Lyrae star, or a periodic star in general, - is approximated as a simple sin wave. Other solutions might make use of light curve templates to - generate light curves. - Two other modifications we introduced for the PeriodicStarModulationMetric are: - In contrast to the PeriodicStar metric, we allow for a random phase offset to mimic observation - starting at random phase. - Also, we vary the periods and amplitudes within +/- 10 % to allow for a more realistic - sample of variable stars. + Like in the PeriodicStar metric, the light curve of an RR Lyrae star, + or a periodic star in general, is approximated as a simple sin wave. + Other solutions might make use of light curve templates + to generate light curves. + Two other modifications we introduced are: + In contrast to the PeriodicStar metric, we allow for a random phase + offset to mimic observation starting at random phase. + Also, we vary the periods and amplitudes within +/- 10 % to allow + for a more realistic sample of variable stars. This metric is based on the cadence note: - N. Hernitschek, K. Stassun, LSST Cadence Note: Cadence impacts on reliable classification - of standard-candle variable stars (2021) https://docushare.lsst.org/docushare/dsweb/Get/Document-37673 + N. Hernitschek, K. Stassun, LSST Cadence Note: + "Cadence impacts on reliable classification of standard-candle + variable stars (2021)" + https://docushare.lsst.org/docushare/dsweb/Get/Document-37673 """ class PeriodicStarModulationMetric(BaseMetric): - """Evaluate how well a periodic source can be fit on a short baseline, using a Monte Carlo simulation. - - At each slice_point, run a Monte Carlo simulation to see how well a periodic source can be fit. - Assumes a simple sin-wave light-curve, and generates Gaussain noise based in the 5-sigma limiting depth - of each observation. - Light curves are evaluated piecewise to test how well we can recover the period, phase and amplitude - from shorter baselines. We allow for a random phase offset to mimic observation starting at random phase. - Also, we vary the periods and amplitudes within +/- 10 % to allow for a more realistic sample of - variable stars. + """Evaluate how well a periodic source can be fit on a short baseline, + using a Monte Carlo simulation. + + At each slice_point, run a Monte Carlo simulation to see how well a + periodic source can be fit. + Assumes a simple sin-wave light-curve, and generates Gaussain noise + based in the 5-sigma limiting depth of each observation. + Light curves are evaluated piecewise to test how well we can recover + the period, phase and amplitude from shorter baselines. + We allow for a random phase offset to mimic observation starting + at random phase. Also, we vary the periods and amplitudes + within +/- 10 % to allow for a more realistic sample of variable stars. Parameters ---------- @@ -56,11 +64,13 @@ class PeriodicStarModulationMetric(BaseMetric): random_phase : `bool`, opt a random phase is assigned (default False) time_interval : `float`, opt - days (default 50); the interval over which we want to evaluate the light curve + days (default 50); + the interval over which we want to evaluate the light curve n_monte : `int`, opt number of noise realizations to make in the Monte Carlo (default 1000) period_tol : `float`, opt - fractional tolerance on the period to demand for a star to be considered well-fit (default 0.05) + fractional tolerance on the period to demand + for a star to be considered well-fit (default 0.05) amp_tol : `float`, opt fractional tolerance on the amplitude to demand (default 0.10) means : `list` of `float`, opt @@ -120,8 +130,8 @@ def __init__( def run(self, data_slice, slice_point=None): # Bail if we don't have enough points - # (need to fit mean magnitudes in each of the available bands - self.means - # and for a period, amplitude, and phase) + # (need to fit mean magnitudes in each of the available bands + # - self.means and for a period, amplitude, and phase) if data_slice.size < self.means.size + 3: return self.badval @@ -154,8 +164,9 @@ def run(self, data_slice, slice_point=None): mags = self.means + slice_point["distMod"] else: mags = self.means - # slightly different periods and amplitudes (+/- 10 %) to mimic true stars - # random phase offsets to mimic observation starting at random phase + # slightly different periods and amplitudes (+/- 10 %) + # to mimic true stars. random phase offsets to mimic + # observation starting at random phase true_period = random.uniform(0.9, 1.1) * self.period true_amplitude = random.uniform(0.9, 1.1) * self.amplitude if np.isnan(self.phase): @@ -178,7 +189,8 @@ def run(self, data_slice, slice_point=None): fit_obj = PeriodicStar(t_subrun["filter"]) with warnings.catch_warnings(): warnings.simplefilter("ignore") - # If it fails to converge, save values that should fail later + # If it fails to converge, + # save values that should fail later try: parm_vals, pcov = curve_fit( fit_obj, @@ -187,11 +199,12 @@ def run(self, data_slice, slice_point=None): p0=true_params, sigma=dmag, ) - except: + except RuntimeError: parm_vals = true_params * 0 + np.inf fits[i, :] = parm_vals - # Throw out any magnitude fits if there are no observations in that filter + # Throw out any magnitude fits if there are no + # observations in that filter ufilters = np.unique(data_slice[self.filter_col]) if ufilters.size < 9: for key in list(self.filter2index.keys()): diff --git a/rubin_sim/maf/maf_contrib/star_count_mass_metric.py b/rubin_sim/maf/maf_contrib/star_count_mass_metric.py index 1073395b9..7c29011b0 100644 --- a/rubin_sim/maf/maf_contrib/star_count_mass_metric.py +++ b/rubin_sim/maf/maf_contrib/star_count_mass_metric.py @@ -27,6 +27,7 @@ # There are stellar luminosity function maps available within MAF # that may supersede these StarCount functions + class StarCountMassMetric(BaseMetric): """Find the number of stars in a given field in the mass range fainter than magnitude 16 and bright enough to have noise less than @@ -51,7 +52,7 @@ class StarCountMassMetric(BaseMetric): Bandpass to consider. """ - def __init__(self, m1=0.9, m2=1.0, band='i', **kwargs): + def __init__(self, m1=0.9, m2=1.0, band="i", **kwargs): self.m1 = m1 self.m2 = m2 self.band = band diff --git a/rubin_sim/maf/maf_contrib/star_count_metric.py b/rubin_sim/maf/maf_contrib/star_count_metric.py index d7f67cece..4aa158980 100644 --- a/rubin_sim/maf/maf_contrib/star_count_metric.py +++ b/rubin_sim/maf/maf_contrib/star_count_metric.py @@ -25,6 +25,7 @@ # There are stellar luminosity function maps available within MAF # that may supersede these StarCount functions + class StarCountMetric(BaseMetric): """Find the number of stars in a given field between d1 and d2 in parsecs. diff --git a/rubin_sim/maf/maf_contrib/star_counts/coords.py b/rubin_sim/maf/maf_contrib/star_counts/coords.py index 9fa114dd0..0a7c040c9 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/coords.py +++ b/rubin_sim/maf/maf_contrib/star_counts/coords.py @@ -9,7 +9,6 @@ # that uses ephem package, for redundancy purposes. # For use with Field Star Count metric -import sys import numpy as np from scipy.optimize import fsolve @@ -24,23 +23,23 @@ def eq_gal(eq_ra, eq_dec): a = np.radians(eq_ra) def equations(p): - b, l, x = p + b, ll, x = p f1a = np.cos(d) * (np.cos(a - rad1)) f2a = np.sin(d) * np.sin(rad2) + np.cos(d) * np.sin(a - rad1) * np.cos(rad2) f3a = np.sin(d) * np.cos(rad2) - np.cos(d) * np.sin(a - rad1) * np.sin(rad2) - f1 = np.cos(b) * np.cos(l - rad3) - f1a - f2 = np.cos(b) * np.sin(l - rad3) - f2a + f1 = np.cos(b) * np.cos(ll - rad3) - f1a + f2 = np.cos(b) * np.sin(ll - rad3) - f2a f3 = np.sin(b) - f3a return (f1, f2, f3) - b, l, x = fsolve(equations, (0, 0, 0)) + b, ll, x = fsolve(equations, (0, 0, 0)) b_deg = np.degrees(b) % 360 # galactic latitude if b_deg >= 270: b_deg = b_deg - 360 if b_deg > 90: b_deg = 180 - b_deg - l = l + np.pi - l_deg = np.degrees(l) % 360 # galactic longitude + ll = ll + np.pi + l_deg = np.degrees(ll) % 360 # galactic longitude return b_deg, l_deg # http://scienceworld.wolfram.com/astronomy/GalacticCoordinates.html @@ -48,7 +47,6 @@ def equations(p): def eq_gal2(eq_ra, eq_dec): d = np.radians(eq_dec) p = np.radians(eq_ra) - AC = np.radians(90.0) - d AB = np.radians(62.8717) CAB = np.radians(192.8585) - p cos_bc = np.sin(d) * np.cos(AB) + np.cos(d) * np.sin(AB) * np.cos(CAB) @@ -63,9 +61,7 @@ def eq_gal2(eq_ra, eq_dec): cos_cbd = -1 CBD = np.arccos(cos_cbd) b_deg = 90.0 - np.degrees(BC) - ad = np.radians(90.0) cad = np.radians(282.8595) - p - coscd = np.cos(cad) * np.cos(d) coscbd = np.cos(cad) * np.cos(d) / np.sin(BC) if coscbd > 1: coscbd = 1 @@ -100,9 +96,3 @@ def gal_cyn(b_deg, l_deg, dist): rho = np.arctan(y / x) return R, rho, Z - -if __name__ == "__main__": - gal_lat, gal_lon = eq_gal2(float(sys.argv[1]), float(sys.argv[2])) - print(gal_lat, gal_lon) - R, rho, z = gal_cyn(gal_lat, gal_lon, float(sys.argv[3])) - print(R, rho, z) diff --git a/rubin_sim/maf/maf_contrib/star_counts/starcount.py b/rubin_sim/maf/maf_contrib/star_counts/starcount.py index 72a5a394c..c7e8bcdbc 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/starcount.py +++ b/rubin_sim/maf/maf_contrib/star_counts/starcount.py @@ -5,15 +5,10 @@ # Last edited 8/15/2015 # Description: Calculates the number of stars in a given direction and # between a given set of distances. For use with Field Star Count metric -import math -import sys import numpy as np -from scipy.optimize import fsolve - from . import coords, stellardensity -# from rubin_sim.coordUtils import AstronomyBase skyarea = 41253.0 distancebins = 51 @@ -27,21 +22,9 @@ def star_vols(d1, d2, area): def starcount(eq_ra, eq_dec, d1, d2): volumes, distances = star_vols(d1, d2, 9.62) - # b_deg, l_deg=coords.eq_gal2(eq_ra, eq_dec) - # b_deg, l_deg=AstrometryBase.equatorialToGalactic(eq_ra, eq_dec) b_deg, l_deg = coords.eq_gal3(eq_ra, eq_dec) positions = [coords.gal_cyn(b_deg, l_deg, x) for x in distances] densities = [stellardensity.stellardensity(x[0], x[2]) for x in positions] totalcount = np.sum(np.asarray(volumes) * np.asarray(densities)) return totalcount - -if __name__ == "__main__": - print( - starcount( - float(sys.argv[1]), - float(sys.argv[2]), - float(sys.argv[3]), - float(sys.argv[4]), - ) - ) diff --git a/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py b/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py index 598d30cb9..422b1d1e8 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py +++ b/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py @@ -8,12 +8,11 @@ # that will be fainter than mag 16, and have sufficiently low noise # in the given band. For use with Field Star Count metric -import sys import numpy as np -from scipy.optimize import fsolve, minimize, newton +from scipy.optimize import newton -from . import abs_mag, coords, spec_type, stellardensity +from . import abs_mag, spec_type from .starcount import starcount xi = 1.0 @@ -91,15 +90,3 @@ def starcount_bymass(eq_ra, eq_dec, m1, m2, band): distances = [dist_calc(x, band) for x in masses[:-1]] starcounts = [y * starcount(eq_ra, eq_dec, x[0], x[1]) for x, y in zip(distances, massfractions)] return sum(starcounts) - - -if __name__ == "__main__": - print( - starcount_bymass( - float(sys.argv[1]), - float(sys.argv[2]), - float(sys.argv[3]), - float(sys.argv[4]), - sys.argv[5], - ) - ) diff --git a/rubin_sim/maf/maf_contrib/star_counts/stellardensity.py b/rubin_sim/maf/maf_contrib/star_counts/stellardensity.py index d76bce89a..bf3cc0db8 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/stellardensity.py +++ b/rubin_sim/maf/maf_contrib/star_counts/stellardensity.py @@ -5,11 +5,8 @@ # Last edited 8/15/2015 # Description: Calculates the stellar density based off of # Juric et al 2008 and Jackson et al 2002. For use with Field Star Count metric -import math -import sys import numpy as np -from scipy.optimize import fsolve zsun = 25.0 rsun = 8000.0 @@ -58,9 +55,5 @@ def stellardensity(R, Z, rho=0): return tot_density -if __name__ == "__main__": - print(stellardensity(float(sys.argv[1]), float(sys.argv[2]))) - - # Juric et al 2008 # Jackson et al 2002 diff --git a/rubin_sim/maf/maf_contrib/var_depth_metric.py b/rubin_sim/maf/maf_contrib/var_depth_metric.py index 68e6dce4a..15d965b37 100644 --- a/rubin_sim/maf/maf_contrib/var_depth_metric.py +++ b/rubin_sim/maf/maf_contrib/var_depth_metric.py @@ -11,7 +11,23 @@ class VarDepth(BaseMetric): - """Calculate the survey depth that a variable star can be reliably identified.""" + """Calculate the survey depth that a variable star can be + reliably identified. + + Parameters + ---------- + completeness : `float`, opt + Fractional desired completeness of recovered variable sample. + contamination : `float`, opt + Fractional allowed incompleteness of recovered nonvariables. + numruns : `int`, opt + Number of simulated realizations of noise. + Most computationally expensive part of metric. + signal : `float`, opt + Sqrt total pulsational power meant to be recovered. + magres : `float`, opt + desired resolution of variability depth result. + """ def __init__( self, @@ -24,15 +40,6 @@ def __init__( magres=0.01, **kwargs, ): - """ - Instantiate metric. - - :m5col: the column name of the individual visit m5 data. - :completeness: fractional desired completeness of recovered variable sample. - :contamination: fractional allowed incompleteness of recovered nonvariables. - :numruns: number of simulated realizations of noise (most computationally espensive part). - :signal: sqrt total pulsational power meant to be recovered. - :magres: desired resolution of variability depth result.""" self.m5col = m5_col self.completeness = completeness self.contamination = contamination @@ -70,19 +77,20 @@ def run(self, data_slice, slice_point=None): # Since we are treating the underlying signal being representable by a # fixed-width gaussian, its variance pdf is a Chi-squared distribution - # with the degrees of freedom = visits. Since variances add, the variance + # with the degrees of freedom=visits. Since variances add, the variance # pdfs convolve. The cumulative distribution function of the sum of two # random deviates is the convolution of one pdf with a cdf. - # We'll consider the cdf of the noise-only variances because it's easier - # to interpolate + # We'll consider the cdf of the noise-only variances + # because it's easier to interpolate noisesorted = np.sort(noiseonlyvar) # linear interpolation interpnoisecdf = UnivariateSpline( noisesorted, np.arange(self.numruns) / float(self.numruns), k=1, s=0 ) - # We need a binned, signal-only variance probability distribution function for numerical convolution + # We need a binned, signal-only variance probability + # distribution function for numerical convolution numsignalsamples = 100 xsig = np.linspace(chi2.ppf(0.001, N), chi2.ppf(0.999, N), numsignalsamples) signalpdf = chi2.pdf(xsig, N) @@ -90,7 +98,8 @@ def run(self, data_slice, slice_point=None): xsig = (self.signal**2.0) * xsig / N pdfstepsize = xsig[1] - xsig[0] # Since everything is going to use this stepsize down the line, - # normalize so the pdf integrates to 1 when summed (no factor of stepsize needed) + # normalize so the pdf integrates to 1 when summed + # (no factor of stepsize needed) signalpdf /= np.sum(signalpdf) # run through the sample magnitudes, calculate distance between cont @@ -112,11 +121,14 @@ def run(self, data_slice, slice_point=None): # Only do calculation if near the solution: if (len(xnoise) > numsignalsamples / 10) and (not solutionfound): noisecdf = interpnoisecdf(xnoise / scalefact) - noisepdf = noisecdf[1:] - noisecdf[:-1] # turn into a noise pdf + # turn into a noise pdf + noisepdf = noisecdf[1:] - noisecdf[:-1] noisepdf /= np.sum(noisepdf) - xnoise = (xnoise[1:] + xnoise[:-1]) / 2.0 # from cdf to pdf conversion + # from cdf to pdf conversion + xnoise = (xnoise[1:] + xnoise[:-1]) / 2.0 - # calculate and plot the convolution = signal+noise variance dist. + # calculate and plot the convolution = + # signal+noise variance dist. convolution = 0 if len(noisepdf) > len(signalpdf): convolution = np.convolve(noisepdf, signalpdf) diff --git a/rubin_sim/maf/maf_contrib/xrb_metrics.py b/rubin_sim/maf/maf_contrib/xrb_metrics.py index 8a4ec6c9a..3db9796e7 100644 --- a/rubin_sim/maf/maf_contrib/xrb_metrics.py +++ b/rubin_sim/maf/maf_contrib/xrb_metrics.py @@ -24,8 +24,10 @@ def __init__(self, seed=42): def lmxb_abs_mags(self, size=1): """Return LMXB absolute magnitudes per LSST filter. - Absolute magnitude relation is taken from Casares 2018 (2018MNRAS.473.5195C) - Colors are taken from M. Johnson+ 2019 (2019MNRAS.484...19J) + Absolute magnitude relation is taken from Casares 2018 + (2018MNRAS.473.5195C) + Colors are taken from M. Johnson+ 2019 + (2019MNRAS.484...19J) Parameters ---------- @@ -42,7 +44,7 @@ def lmxb_abs_mags(self, size=1): # Derive random orbital periods from the sample in Casares 18 Table 4 # Since there are significant outliers from a single Gaussian sample, - # take random choices with replacement and then perturb them fractionally + # take random choices with replacement, then perturb them fractionally catalog__porb = np.array( [ 33.85, @@ -153,7 +155,8 @@ def fred(self, t, amplitude, tau_rise, tau_decay): return amplitude * np.exp(2 * np.sqrt(tau_rise / tau_decay)) * np.exp(-tau_rise / t - t / tau_decay) def lightcurve(self, t, filtername, params): - """Generate an XRB outburst lightcurve for given times and a single filter. + """Generate an XRB outburst lightcurve for given times + and a single filter. Uses a simple fast-rise, exponential decay with parameters taken from Chen, Shrader, & Livio 1997 (ApJ 491, 312). @@ -173,7 +176,8 @@ def lightcurve(self, t, filtername, params): Returns ------- lc : `array` - Magnitudes of the outburst at the specified times in the given filter + Magnitudes of the outburst at the specified times in + the given filter """ # fill lightcurve with nondetections @@ -194,7 +198,8 @@ def lightcurve(self, t, filtername, params): return lc def detectable_duration(self, params, ebv, distance): - """Determine time range an outburst is detectable with perfect sampling. + """Determine time range an outburst is detectable with + perfect sampling. Does not consider visibility constraints. @@ -210,9 +215,11 @@ def detectable_duration(self, params, ebv, distance): Returns ---------- visible_start_time : `float` - first time relative to outburst start that the outburst could be detected + first time relative to outburst start that the outburst + could be detected visible_end_time : `float` - last time relative to outburst start that the outburst could be detected + last time relative to outburst start that the outburst + could be detected """ nmodelt = 10000 @@ -256,6 +263,24 @@ def detectable_duration(self, params, ebv, distance): class XRBPopMetric(BaseMetric): + """Evaluate whether a given XRB would be detectable. + + Includes a variety of detection criteria options, including if the + XRB is possible to detect, if it is detected at least pts_needed times, + or if it is detected pts_early times within t_early days of the start of + the outburst. + + Parameters + ---------- + pts_needed : `int`, opt + Minimum number of detections, for simple `detected` option. + mjd0 : `float`, opt + Start of survey. + output_lc : `bool`, opt + If True, output lightcurve points. + If False, just return metric values. + """ + def __init__( self, metric_name="XRBPopMetric", @@ -264,6 +289,8 @@ def __init__( filter_col="filter", night_col="night", pts_needed=2, + pts_early=2, + t_early=2, mjd0=None, output_lc=False, badval=-666, @@ -275,6 +302,8 @@ def __init__( self.filter_col = filter_col self.night_col = night_col self.pts_needed = pts_needed + self.pts_early = pts_early + self.t_early = t_early # `bool` variable, if True the light curve will be exported self.output_lc = output_lc @@ -296,7 +325,9 @@ def __init__( self.comment = "Number or characterization of XRBs." def _ever_detect(self, where_detected): - """Simple detection criteria: detect at least a certain number of times""" + """Simple detection criteria: detect at least a certain number + of times. + """ # Detected data points return np.size(where_detected) >= self.pts_needed @@ -324,12 +355,14 @@ def _early_detect(self, where_detected, time, early_window_days=7.0, n_early_det return np.sum(time[where_detected] <= early_window_days) >= n_early_detections def _mean_time_between_detections(self, t): - """Calculate the mean time between detections over the visible interval. + """Calculate the mean time between detections over the + visible interval. Parameters ---------- t : `array` - Times of detections, bracketed by the start and end visibility times + Times of detections, bracketed by the start and + end visibility times Return ---------- @@ -340,7 +373,8 @@ def _mean_time_between_detections(self, t): return np.mean(np.sort(np.diff(t))) def _possible_to_detect(self, visible_duration): - """Return True if the outburst is ever bright enough for LSST to detect + """Return True if the outburst is ever bright enough + for LSST to detect. Parameters ---------- @@ -379,7 +413,7 @@ def run(self, data_slice, slice_point=None): result["possible_to_detect"] = self._possible_to_detect(slice_point["visible_duration"]) result["ever_detect"] = self._ever_detect(where_detected) - result["early_detect"] = self._early_detect(where_detected, t) + result["early_detect"] = self._early_detect(where_detected, t, self.t_early, self.pts_early) result["number_of_detections"] = self._number_of_detections(where_detected) if result["number_of_detections"] > 1: @@ -430,7 +464,7 @@ def reduce_mean_time_between_detections(self, metric): def generate_xrb_pop_slicer(t_start=1, t_end=3652, n_events=10000, seed=42): """Generate a population of XRB events, and put the info about them - into a UserPointSlicer object + into a UserPointSlicer object. Parameters ---------- diff --git a/rubin_sim/maf/maf_contrib/young_stellar_objects_metric.py b/rubin_sim/maf/maf_contrib/young_stellar_objects_metric.py index bf05b1369..de6ad6bb9 100644 --- a/rubin_sim/maf/maf_contrib/young_stellar_objects_metric.py +++ b/rubin_sim/maf/maf_contrib/young_stellar_objects_metric.py @@ -51,31 +51,38 @@ def __call__(self, r): class NYoungStarsMetric(BaseMetric): - """Calculate the distance or number of stars with color uncertainty defined by mags/snrs. + """Calculate the distance or number of stars with + color uncertainty defined by mags/snrs. Parameters ---------- - metric_name : str, opt + metric_name : `str`, opt Default 'young_stars'. - m5_col : str, opt - The default column name for m5 information in the input data. Default fiveSigmaDepth. - filter_col : str, opt + m5_col : `str`, opt + The default column name for m5 information in the input data. + Default fiveSigmaDepth. + filter_col : `str`, opt The column name for the filter information. Default filter. - mags : dict - The absolute magnitude of the object in question. Keys of filter name, values in mags. + mags : `dict`, opt + The absolute magnitude of the object in question. + Keys of filter name, values in mags. Default is for a 0.3 solar mass star at age = 100 Myr. - snrs : dict + snrs : `dict`, opt The SNR to demand for each filter. - galb_limit : float, opt - The galactic latitude above which to return zero (degrees). Default 90. - badval : float, opt - The value to return when the metric value cannot be calculated. Default 0. - return_distance : bool, opt - Whether the metric will return the maximum distance that can be reached for each slice_point, + galb_limit : `float`, opt + The galactic latitude above which to return zero (degrees). + Default 90. + badval : `float`, opt + The value to return when the metric value cannot be calculated. + Default 0. + return_distance : `bool`, opt + Whether the metric will return the maximum distance that + can be reached for each slice_point, or the total number of stars down to mags/snrs. - crowding_error: float, opt + crowding_error : `float`, opt Crowding error that gets passed to CrowdingM5Metric. Default 0.25. - use_2D_extinction: Uses the 2D extinction map instead of the 3D one. Default False. + use_2D_extinction : `bool`, opt + Uses the 2D extinction map instead of the 3D one. Default False. """ def __init__( @@ -109,7 +116,7 @@ def __init__( self.return_distance = return_distance units = "kpc" if self.return_distance else "N stars" super().__init__(cols, metric_name=metric_name, maps=maps, units=units, badval=badval, **kwargs) - # Save R_x values for on-the-fly calculation of dust extinction with map + # Save R_x values for on-the-fly calculation of dust extinction self.r_x = DustValues().r_x.copy() # set return type self.m5_col = m5_col @@ -141,8 +148,8 @@ def run(self, data_slice, slice_point=None): sky_area = np.pi * (np.radians(1.75)) ** 2 # if we are outside the galb_limit, return nothing - # Note we could make this a more complicated function that returns an expected density of - # star forming regions + # Note we could make this a more complicated function that + # returns an expected density of star forming regions if np.abs(slice_point["galb"]) > self.galb_limit: return self.badval @@ -179,7 +186,8 @@ def run(self, data_slice, slice_point=None): filtername=filtername, ) distances.append(dist) - # compute the final distance, limited by whichever filter is most shallow + # compute the final distance, limited by whichever filter is + # most shallow final_distance = np.min(distances, axis=-1) / 1e3 # to kpc if self.return_distance: return final_distance diff --git a/rubin_sim/maf/metric_bundles/metric_bundle.py b/rubin_sim/maf/metric_bundles/metric_bundle.py index 75616eff8..fdf588d06 100644 --- a/rubin_sim/maf/metric_bundles/metric_bundle.py +++ b/rubin_sim/maf/metric_bundles/metric_bundle.py @@ -21,25 +21,33 @@ def create_empty_metric_bundle(): Returns ------- - MetricBundle - An empty metric bundle, configured with just the :class:`BaseMetric` and :class:`BaseSlicer`. + MetricBundle : `MetricBundle` + An empty metric bundle, + configured with just the :class:`BaseMetric` and :class:`BaseSlicer`. """ return MetricBundle(metrics.BaseMetric(), slicers.BaseSlicer(), "") class MetricBundle: - """The MetricBundle is defined by a combination of a (single) metric, slicer and - constraint - together these define a unique combination of an opsim benchmark. - An example would be: a CountMetric, a HealpixSlicer, and a constraint 'filter="r"'. + """Define the "thing" you are measuring, with a combination of + * metric (calculated per data_slice) + * slicer (how to create the data_slices) + * constraint (an optional definition of a large subset of data) - After the metric is evaluated over the slice_points of the slicer, the resulting - metric values are saved in the MetricBundle. + Together these define a unique combination of an opsim benchmark. + An example would be: + a CountMetric, a HealpixSlicer, and a constraint of 'filter="r"'. - The MetricBundle also saves the summary metrics to be used to generate summary - statistics over those metric values, as well as the resulting summary statistic values. + After the metric is evaluated at each slice_point created by the + slicer, the resulting metric values are saved in the MetricBundle. - Plotting parameters and display parameters (for showMaf) are saved in the MetricBundle, - as well as additional info_label such as the opsim run name, and relevant stackers and maps + The MetricBundle also saves the summary metrics to be used + to generate summary statistics over those metric values, + as well as the resulting summary statistic values. + + Plotting parameters and display parameters (for show_maf) are saved + in the MetricBundle, as well as additional info_label such as the + opsim run name, and relevant stackers and maps to apply when calculating the metric values. Parameters @@ -49,33 +57,46 @@ class MetricBundle: slicer : `~rubin_sim.maf.slicer` The Slicer to apply to the incoming visit data (the observations). constraint : `str` or None, opt - A (sql-style) constraint to apply to the visit data, to apply a broad sub-selection. - stacker_list : `list` of `~rubin_sim.maf.stacker`, opt - A list of pre-configured stackers to use to generate additional columns per visit. - These will be generated automatically if needed, but pre-configured versions will override these. + A (sql-style) constraint to apply to the visit data, to apply a + broad sub-selection. + stacker_list : `list` [`~rubin_sim.maf.stacker`], opt + A list of pre-configured stackers to use to generate additional + columns per visit. + These will be generated automatically if needed, but pre-configured + versions will override these. run_name : `str`, opt - The name of the simulation being run. This will be added to output files and plots. - Setting it prevents file conflicts when running the same metric on multiple simulations, and + The name of the simulation being run. + This will be added to output files and plots. + Setting it prevents file conflicts when running the same + metric on multiple simulations, and provides a way to identify which simulation is being analyzed. metadata : `str`, opt A deprecated version of info_label (below). - Values set by metadata will be used for info_label. If both are set, info_label is used. + Values set by metadata will be used for info_label. + If both are set, info_label is used. info_label : `str` or None, opt Information to add to the output metric data file name and plot labels. - If this is not provided, it will be auto-generated from the constraint (if any). - Setting this provides an easy way to specify different configurations of a metric, a slicer, + If this is not provided, it will be auto-generated from the + constraint (if any). + Setting this provides an easy way to specify different + configurations of a metric, a slicer, or just to rewrite your constraint into friendlier terms. - (i.e. a constraint like 'note not like "%DD%"' can become "non-DD" in the file name and plot labels + (i.e. a constraint like 'note not like "%DD%"' can become + "non-DD" in the file name and plot labels by specifying info_label). plot_dict : `dict` of plotting parameters, opt Specify general plotting parameters, such as x/y/color limits. display_dict : `dict` of display parameters, opt - Specify parameters for showMaf web pages, such as the side bar labels and figure captions. - Keys: 'group', 'subgroup', 'caption', and 'order' (such as to set metrics in filter order, etc) + Specify parameters for show_maf web pages, such as the + side bar labels and figure captions. + Keys: 'group', 'subgroup', 'caption', and 'order' + (such as to set metrics in filter order, etc) summary_metrics : `list` of `~rubin_sim.maf.metrics` - A list of summary metrics to run to summarize the primary metric, such as MedianMetric, etc. + A list of summary metrics to run to summarize the + primary metric, such as MedianMetric, etc. maps_list : `list` of `~rubin_sim.maf.maps` - A list of pre-configured maps to use for the metric. This will be auto-generated if specified + A list of pre-configured maps to use for the metric. + This will be auto-generated if specified by the metric class, but pre-configured versions will override these. """ @@ -144,7 +165,7 @@ def __init__( map_names = [map_name.__class__.__name__ for map_name in self.maps_list] if hasattr(self.metric, "maps"): for map_needed in self.metric.maps: - if type(map_needed) == str: + if isinstance(map_needed, str): if map_needed not in map_names: temp_map = getattr(maps, map_needed)() self.maps_list.append(temp_map) @@ -215,7 +236,8 @@ def _setup_metric_values(self): def _build_metadata(self, info_label, metadata=None): """If no info_label is provided, process the constraint - (by removing extra spaces, quotes, the word 'filter' and equal signs) to make a info_label version. + (by removing extra spaces, quotes, the word 'filter' and equal signs) + to make a info_label version. e.g. 'filter = "r"' becomes 'r' """ # Pass the deprecated version into info_label if info_label is not set @@ -234,7 +256,8 @@ def _build_metadata(self, info_label, metadata=None): def _build_file_root(self): """ - Build an auto-generated output filename root (i.e. minus the plot type or .npz ending). + Build an auto-generated output filename root + (i.e. minus the plot type or .npz ending). """ # Build basic version. self.file_root = "_".join( @@ -250,10 +273,10 @@ def _build_file_root(self): def _find_req_cols(self): """Find the columns needed by the metrics, slicers, and stackers. - If there are any additional stackers required, instatiate them and add them to - the self.stackers list. - (default stackers have to be instantiated to determine what additional columns - are needed from database). + If there are any additional stackers required, instatiate them + and add them to the self.stackers list. + (default stackers have to be instantiated to determine + what additional columns are needed from database). """ # Find all the columns needed by metric and slicer. known_cols = self.slicer.columns_needed + list(self.metric.col_name_arr) @@ -268,7 +291,8 @@ def _find_req_cols(self): if self.col_info.get_data_source(col) == self.col_info.default_data_source: self.db_cols.add(col) else: - # New default stackers could come from metric/slicer or stackers. + # New default stackers could come from metric/slicer + # or stackers. new_stackers.add(self.col_info.get_data_source(col)) # Remove already-specified stackers from default list. for s in self.stacker_list: @@ -304,7 +328,7 @@ def set_summary_metrics(self, summary_metrics): Parameters ---------- - summary_metrics : `List` of [`BaseMetric`] + summary_metrics : `List` [`BaseMetric`] Instantiated summary metrics to use to calculate summary statistics for this metric. """ @@ -448,9 +472,9 @@ def set_run_name(self, run_name, update_file_root=True): Parameters ---------- - run_name: `str` + run_name : `str` Run Name, which will become part of the fileRoot. - fileRoot: `bool`, optional + fileRoot : `bool`, optional Flag to update the fileRoot with the run_name. """ self.run_name = run_name @@ -508,7 +532,8 @@ def write(self, comment="", out_dir=".", outfile_suffix=None, results_db=None): self.write_db(results_db=results_db) def output_json(self): - """Set up and call the baseSlicer outputJSON method, to output to IO string. + """Set up and call the baseSlicer outputJSON method, + to output to IO string. Returns ------- @@ -581,7 +606,7 @@ def load(cls, filename): Parameters ---------- - filename : str + filename : `str` The file from which to read the metric bundle data. """ metric_bundle = cls(metrics.BaseMetric(), slicers.BaseSlicer(), "") @@ -589,28 +614,32 @@ def load(cls, filename): return metric_bundle def compute_summary_stats(self, results_db=None): - """Compute summary statistics on metric_values, using summaryMetrics (metricbundle list). + """Compute summary statistics on metric_values, + using summaryMetrics (metricbundle list). Parameters ---------- results_db : Optional[ResultsDb] - ResultsDb object to use to store the summary statistic values on disk. + ResultsDb object to use to store the + summary statistic values on disk. """ if self.summary_values is None: self.summary_values = {} if self.summary_metrics is not None: - # Build array of metric values, to use for (most) summary statistics. + # Build array of metric values, to use for summary statistics. rarr_std = np.array( list(zip(self.metric_values.compressed())), dtype=[("metricdata", self.metric_values.dtype)], ) for m in self.summary_metrics: - # The summary metric colname should already be set to 'metricdata', but in case it's not: + # The summary metric colname should already be set + # to 'metricdata', but in case it's not: m.colname = "metricdata" summary_name = m.name.replace(" metricdata", "").replace(" None", "") if hasattr(m, "mask_val"): - # summary metric requests to use the mask value, as specified by itself, - # rather than skipping masked vals. + # summary metric requests to use the mask value, + # as specified by itself, + # rather than skipping masked vals. rarr = np.array( list(zip(self.metric_values.filled(m.mask_val))), dtype=[("metricdata", self.metric_values.dtype)], @@ -644,25 +673,28 @@ def reduce_metric( reduce_display_dict=None, ): """Run 'reduceFunc' (any function that operates on self.metric_values). - Typically reduceFunc will be the metric reduce functions, as they are tailored to expect the - metric_values format. - reduceDisplayDict and reducePlotDicts are displayDicts and plotDicts to be - applied to the new metricBundle. + + Typically reduceFunc will be the metric reduce functions, + as they are tailored to expect the metric_values format. + reduceDisplayDict and reducePlotDicts are displayDicts + and plotDicts to be applied to the new metricBundle. Parameters ---------- - reduce_func : Func - Any function that will operate on self.metric_values (typically metric.reduce* function). - reduce_plot_dict : Optional[dict] + reduce_func : `Func` + Any function that will operate on self.metric_values + (typically metric.reduce* function). + reduce_plot_dict : `dict`, opt Plot dictionary for the results of the reduce function. - reduce_display_dict : Optional[dict] + reduce_display_dict : `dict`, opt Display dictionary for the results of the reduce function. Returns ------- - MetricBundle - New metric bundle, inheriting info_label from this metric bundle, but containing the new - metric values calculated with the 'reduceFunc'. + newmetric_bundle: `MetricBundle` + New metric bundle, inheriting info_label from this metric bundle, + but containing the new metric values calculated with + the 'reduceFunc'. """ # Generate a name for the metric values processed by the reduceFunc. if reduce_func_name is not None: @@ -670,7 +702,8 @@ def reduce_metric( else: r_name = reduce_func.__name__.replace("reduce_", "") reduce_name = self.metric.name + "_" + r_name - # Set up metricBundle to store new metric values, and add plot_dict/display_dict. + # Set up metricBundle to store new metric values, + # and add plot_dict/display_dict. newmetric = deepcopy(self.metric) newmetric.name = reduce_name newmetric.metric_dtype = "float" @@ -693,22 +726,27 @@ def reduce_metric( ) # Build a new output file root name. newmetric_bundle._build_file_root() - # Add existing plot_dict (except for title/xlabels etc) into new plot_dict. + # Add existing plot_dict (except for title/xlabels etc) + # into new plot_dict. for k, v in self.plot_dict.items(): if k not in newmetric_bundle.plot_dict: newmetric_bundle.plot_dict[k] = v - # Update newmetric_bundle's plot dictionary with any set explicitly by reducePlotDict. + # Update newmetric_bundle's plot dictionary with + # any set explicitly by reducePlotDict. newmetric_bundle.set_plot_dict(reduce_plot_dict) # Copy the parent metric's display dict into the reduce display dict. newmetric_bundle.set_display_dict(self.display_dict) - # Set the reduce function display 'order' (this is set in the BaseMetric - # by default, but can be overriden in a metric). + # Set the reduce function display 'order' + # (this is set in the BaseMetric by default, + # but can be overriden in a metric). order = newmetric.reduce_order[r_name] newmetric_bundle.display_dict["order"] = order - # And then update the newmetric_bundle's display dictionary with any set + # And then update the newmetric_bundle's + # display dictionary with any set # explicitly by reduceDisplayDict. newmetric_bundle.set_display_dict(reduce_display_dict) - # Set up new metricBundle's metric_values masked arrays, copying metricValue's mask. + # Set up new metricBundle's metric_values masked arrays, + # copying metricValue's mask. newmetric_bundle.metric_values = ma.MaskedArray( data=np.empty(len(self.slicer), "float"), mask=self.metric_values.mask.copy(), @@ -726,24 +764,29 @@ def reduce_metric( def plot(self, plot_handler=None, plot_func=None, outfile_suffix=None, savefig=False): """ - Create all plots available from the slicer. plotHandler holds the output directory info, etc. + Create all plots available from the slicer. + plotHandler holds the output directory info, etc. Parameters ---------- - plot_handler : Optional[PlotHandler] - The plot_handler saves the output location and results_db connection for a set of plots. - plot_func : Optional[BasePlotter] - Any plotter function. If not specified, the plotters in self.plotFuncs will be used. - outfile_suffix : Optional[str] + plot_handler : `~maf.plots.plot_handler`, opt + The plot_handler saves the output location + and results_db connection for a set of plots. + plot_func : `maf.plots.BasePlotter`, opt + Any plotter function. + If not specified, the plotters in self.plotFuncs will be used. + outfile_suffix : `str`, opt Optional string to append to the end of the plot output files. Useful when creating sequences of images for movies. - savefig : Optional[bool] - Flag indicating whether or not to save the figure to disk. Default is False. + savefig : `bool`, opt + Flag indicating whether or not to save the figure to disk. + Default is False. Returns ------- - dict - Dictionary of plot_type:figure number key/value pairs, indicating what plots were created + made_plots : `dict` + Dictionary of plot_type:figure number key/value pairs, + indicating what plots were created and what matplotlib figure numbers were used. """ # Generate a plot_handler if none was set. diff --git a/rubin_sim/maf/metric_bundles/metric_bundle_group.py b/rubin_sim/maf/metric_bundles/metric_bundle_group.py index 40945ecdf..a22923d0b 100644 --- a/rubin_sim/maf/metric_bundles/metric_bundle_group.py +++ b/rubin_sim/maf/metric_bundles/metric_bundle_group.py @@ -18,14 +18,15 @@ def make_bundles_dict_from_list(bundle_list): - """Utility to convert a list of MetricBundles into a dictionary, keyed by the fileRoot names. + """Utility to convert a list of MetricBundles into a dictionary, + keyed by the fileRoot names. Raises an exception if the fileroot duplicates another metricBundle. (Note this should alert to potential cases of filename duplication). Parameters ---------- - bundle_list : `list` of `MetricBundles` + bundle_list : `list` [`MetricBundles`] """ b_dict = {} for b in bundle_list: @@ -36,47 +37,57 @@ def make_bundles_dict_from_list(bundle_list): class MetricBundleGroup: - """The MetricBundleGroup exists to calculate the metric values for a group of - MetricBundles. + """Calculate all values for a group of MetricBundles. - The MetricBundleGroup will query data from a single database table (for multiple - constraints), use that data to calculate metric values for multiple slicers, - and calculate summary statistics and generate plots for all metrics included in + The MetricBundleGroup will query data from a single database table + (for multiple constraints), use that data to calculate metric values + for multiple slicers, and calculate summary statistics and + generate plots for all metrics included in the dictionary passed to the MetricBundleGroup. - We calculate the metric values here, rather than in the individual MetricBundles, - because it is much more efficient to step through a slicer once (and calculate all - the relevant metric values at each point) than it is to repeat this process multiple times. + We calculate the metric values here, rather than in the + individual MetricBundles, because it is much more efficient to step + through a slicer once (and calculate all the relevant metric values + at each point) than it is to repeat this process multiple times. - The MetricBundleGroup also determines how to efficiently group the MetricBundles - to reduce the number of sql queries of the database, grabbing larger chunks of data at once. + The MetricBundleGroup also determines how to efficiently group + the MetricBundles to reduce the number of sql queries of the database, + grabbing larger chunks of data at once. Parameters ---------- - bundle_dict : `dict` or `list` of `MetricBundles` - Individual MetricBundles should be placed into a dictionary, and then passed to - the MetricBundleGroup. The dictionary keys can then be used to identify MetricBundles - if needed -- and to identify new MetricBundles which could be created if 'reduce' - functions are run on a particular MetricBundle. - A bundle_dict can be conveniently created from a list of MetricBundles using - makeBundlesDictFromList (done automatically if a list is passed in) + bundle_dict : `dict` or `list` [`MetricBundles`] + Individual MetricBundles should be placed into a dictionary, + and then passed to the MetricBundleGroup. + The dictionary keys can then be used to identify MetricBundles + if needed -- and to identify new MetricBundles which could be + created if 'reduce' functions are run on a particular MetricBundle. + A bundle_dict can be conveniently created from a list of MetricBundles + using makeBundlesDictFromList (done automatically if a list is passed). db_con : `str` or database connection object - A str that is the path to a sqlite3 file or a database object that can be used by pandas.read_sql. - Advanced use: It is possible to set this to None, in which case data should be passed - directly to the runCurrent method (and runAll should not be used). - out_dir : `str`, optional + A str that is the path to a sqlite3 file or a database object + that can be used by pandas.read_sql. + Advanced use: It is possible to set this to None, in which case + data should be passed directly to the runCurrent method + (and runAll should not be used). + out_dir : `str`, opt Directory to save the metric results. Default is the current directory. - results_db : `ResultsDb`, optional - A results database. If not specified, one will be created in the out_dir. - This database saves information about the metrics calculated, including their summary statistics. - verbose : `bool`, optional + results_db : `ResultsDb`, opt + A results database to store summary stat information. + If not specified, one will be created in the out_dir. + This database saves information about the metrics calculated, + including their summary statistics. + verbose : `bool`, opt Flag to turn on/off verbose feedback. - save_early : `bool`, optional - If True, metric values will be saved immediately after they are first calculated (to prevent - data loss) as well as after summary statistics are calculated. - If False, metric values will only be saved after summary statistics are calculated. - db_table : `str`, optional + save_early : `bool`, opt + If True, metric values will be saved immediately after + they are first calculated (to prevent data loss) as well as after + summary statistics are calculated. + If False, metric values will only be saved after summary statistics + are calculated. + db_table : `str`, opt The name of the table in the db_obj to query for data. + For modern opsim outputs, this table is `observations` (default None). """ def __init__( @@ -90,7 +101,7 @@ def __init__( db_table=None, ): """Set up the MetricBundleGroup.""" - if type(bundle_dict) is list: + if isinstance(bundle_dict, list): bundle_dict = make_bundles_dict_from_list(bundle_dict) # Print occasional messages to screen. self.verbose = verbose @@ -129,19 +140,21 @@ def __init__( def _check_compatible(self, metric_bundle1, metric_bundle2): """Check if two MetricBundles are "compatible". - Compatible indicates that the sql constraints, the slicers, and the maps are the same, and + + Compatible indicates that the sql constraints, the slicers, + and the maps are the same, and that the stackers do not interfere with each other (i.e. are not trying to set the same column in different ways). Returns True if the MetricBundles are compatible, False if not. Parameters ---------- - metric_bundle1 : MetricBundle - metric_bundle2 : MetricBundle + metric_bundle1 : `MetricBundle` + metric_bundle2 : `MetricBundle` Returns ------- - bool + match : `bool` """ if metric_bundle1.constraint != metric_bundle2.constraint: return False @@ -151,7 +164,8 @@ def _check_compatible(self, metric_bundle1, metric_bundle2): return False for stacker in metric_bundle1.stacker_list: for stacker2 in metric_bundle2.stacker_list: - # If the stackers have different names, that's OK, and if they are identical, that's ok. + # If the stackers have different names, that's OK, + # and if they are identical, that's ok. if (stacker.__class__.__name__ == stacker2.__class__.__name__) & (stacker != stacker2): return False # But if we got this far, everything matches. @@ -160,8 +174,8 @@ def _check_compatible(self, metric_bundle1, metric_bundle2): def _find_compatible_lists(self): """Find sets of compatible metricBundles from the currentBundleDict.""" # CompatibleLists stores a list of lists; - # each (nested) list contains the bundleDict _keys_ of a compatible set of metricBundles. - # + # each (nested) list contains the bundleDict _keys_ + # of a compatible set of metricBundles. compatible_lists = [] for k, b in self.current_bundle_dict.items(): found_compatible = False @@ -169,16 +183,20 @@ def _find_compatible_lists(self): comparison_metric_bundle_key = compatible_list[0] compatible = self._check_compatible(self.bundle_dict[comparison_metric_bundle_key], b) if compatible: - # Must compare all metricBundles in each subset (if they are a potential match), - # as the stackers could be different (and one could be incompatible, + # Must compare all metricBundles in each subset + # (if they are a potential match), + # as the stackers could be different + # (and one could be incompatible, # not necessarily the first) for comparison_metric_bundle_key in compatible_list[1:]: compatible = self._check_compatible(self.bundle_dict[comparison_metric_bundle_key], b) if not compatible: - # If we find one which is not compatible, stop and go on to the + # If we find one which is not compatible, + # stop and go on to the # next subset list. break - # Otherwise, we reached the end of the subset and they were all compatible. + # Otherwise, we reached the end of the subset + # and they were all compatible. found_compatible = True compatible_list.append(k) if not found_compatible: @@ -191,23 +209,25 @@ def _find_compatible_lists(self): self.compatible_lists = compatible_lists def run_all(self, clear_memory=False, plot_now=False, plot_kwargs=None): - """Runs all the metricBundles in the metricBundleGroup, over all constraints. + """Runs all the metricBundles in the metricBundleGroup, + over all constraints. - Calculates metric values, then runs reduce functions and summary statistics for - all MetricBundles. + Calculates metric values, then runs reduce functions and summary + statistics for all MetricBundles. Parameters ---------- clear_memory : `bool`, optional - If True, deletes metric values from memory after running each constraint group. + If True, deletes metric values from memory after running + each constraint group. plot_now : `bool`, optional If True, plots the metric values immediately after calculation. plot_kwargs : `bool`, optional kwargs to pass to plotCurrent. """ for constraint in self.constraints: - # Set the 'currentBundleDict' which is a dictionary of the metricBundles which match this - # constraint. + # Set the 'currentBundleDict' which is a dictionary of the + # metricBundles which match this constraint. self.run_current( constraint, clear_memory=clear_memory, @@ -216,13 +236,15 @@ def run_all(self, clear_memory=False, plot_now=False, plot_kwargs=None): ) def set_current(self, constraint): - """Utility to set the currentBundleDict (i.e. a set of metricBundles with the same SQL constraint). + """Utility to set the currentBundleDict + (i.e. a set of metricBundles with the same SQL constraint). Parameters ---------- constraint : `str` - The subset of MetricBundles with metricBundle.constraint == constraint will be - included in a subset identified as the currentBundleDict. + The subset of MetricBundles with metricBundle.constraint == + constraint will be included in a subset identified as the + currentBundleDict. These are the active metrics to be calculated and plotted, etc. """ if constraint is None: @@ -256,17 +278,17 @@ def run_current( ---------- constraint : `str` constraint to use to set the currently active metrics - sim_data : `numpy.ndarray`, optional + sim_data : `np.ndarray`, ops If simData is not None, then this numpy structured array is used instead of querying data from the dbObj. - clear_memory : `bool`, optional + clear_memory : `bool`, ops If True, metric values are deleted from memory after they are calculated (and saved to disk). - plot_now : `bool`, optional + plot_now : `bool`, ops Plot immediately after calculating metric values (instead of the usual procedure, which is to plot after metric values are calculated for all constraints). - plot_kwargs : kwargs, optional + plot_kwargs : kwargs, ops Plotting kwargs to pass to plotCurrent. """ self.set_current(constraint) @@ -371,10 +393,11 @@ def _run_compatible(self, compatible_list): """Runs a set of 'compatible' metricbundles in the MetricBundleGroup dictionary identified by 'compatible_list' keys. - A compatible list of MetricBundles is a subset of the currentBundleDict. + A compatible list of MetricBundles is a subset of the + currentBundleDict. The currentBundleDict == set of MetricBundles with the same constraint. The compatibleBundles == set of MetricBundles with the same constraint, - the same slicer, the same maps applied to the slicer, + AND the same slicer, the same maps applied to the slicer, and stackers which do not clobber each other's data. This is where the work of calculating the metric values is done. @@ -541,12 +564,15 @@ def reduce_current(self, update_summaries=True): # Create a temporary dictionary to hold the reduced metricbundles. reduce_bundle_dict = {} for b in self.current_bundle_dict.values(): - # If there are no reduce functions associated with the metric, skip this metricBundle. + # If there are no reduce functions associated with the metric, + # skip this metricBundle. if len(b.metric.reduce_funcs) > 0: - # Apply reduce functions, creating a new metricBundle in the process (new metric values). + # Apply reduce functions, creating a new metricBundle in + # the process (new metric values). for r in b.metric.reduce_funcs: newmetricbundle = b.reduce_metric(b.metric.reduce_funcs[r], reduce_func_name=r) - # Add the new metricBundle to our metricBundleGroup dictionary. + # Add the new metricBundle to our metricBundleGroup + # dictionary. name = newmetricbundle.metric.name if name in self.bundle_dict: name = newmetricbundle.file_root @@ -560,7 +586,8 @@ def reduce_current(self, update_summaries=True): b.summary_metrics = [] # Add the new metricBundles to the MetricBundleGroup dictionary. self.bundle_dict.update(reduce_bundle_dict) - # And add to to the currentBundleDict too, so we run as part of 'summaryCurrent'. + # And add to to the currentBundleDict too, so we run as part + # of 'summaryCurrent'. self.current_bundle_dict.update(reduce_bundle_dict) def summary_all(self): @@ -575,7 +602,8 @@ def summary_all(self): def summary_current(self): """Run summary statistics on all the metricBundles in the - currently active set of MetricBundles.""" + currently active set of MetricBundles. + """ for b in self.current_bundle_dict.values(): b.compute_summary_stats(self.results_db) @@ -692,7 +720,8 @@ def plot_current( def write_all(self): """Save all the MetricBundles to disk. - Saving all MetricBundles to disk at this point assumes that clearMemory was False. + Saving all MetricBundles to disk at this point assumes that + clearMemory was False. """ for constraint in self.constraints: self.set_current(constraint) diff --git a/rubin_sim/maf/metric_bundles/mo_metric_bundle.py b/rubin_sim/maf/metric_bundles/mo_metric_bundle.py index 3dba0b7c2..17f9c8084 100644 --- a/rubin_sim/maf/metric_bundles/mo_metric_bundle.py +++ b/rubin_sim/maf/metric_bundles/mo_metric_bundle.py @@ -26,36 +26,41 @@ def create_empty_mo_metric_bundle(): Returns ------- ~rubin_sim.maf.metricBundles.MoMetricBundle - An empty metric bundle, configured with just the :class:`BaseMetric` and :class:`BaseSlicer`. + An empty metric bundle, configured with just + the :class:`BaseMetric` and :class:`BaseSlicer`. """ return MoMetricBundle(BaseMoMetric(), MoObjSlicer(), None) def make_completeness_bundle(bundle, completeness_metric, h_mark=None, results_db=None): - """ - Make a mock metric bundle from a bundle which had MoCompleteness or MoCumulativeCompleteness summary - metrics run. This lets us use the plotHandler + plots.MetricVsH to generate plots. - Will also work with completeness metric run in order to calculate fraction of the population, - or with MoCompletenessAtTime metric. + """Make a mock metric bundle from a bundle which had + MoCompleteness or MoCumulativeCompleteness summary + metrics run. This lets us use the plotHandler + plots.MetricVsH + to generate plots. + Will also work with completeness metric run in order to calculate + fraction of the population, or with MoCompletenessAtTime metric. Parameters ---------- - bundle : ~rubin_sim.maf.metricBundles.MetricBundle + bundle : `~rubin_sim.maf.metricBundles.MetricBundle` The metric bundle with a completeness summary statistic. - completeness_metric : ~rubin_sim.maf.metric + completeness_metric : `~rubin_sim.maf.metric` The summary (completeness) metric to run on the bundle. - h_mark : float, optional - The Hmark value to add to the plotting dictionary of the new mock bundle. Default None. - results_db : ~rubin_sim.maf.db.ResultsDb, optional - The results_db in which to record the summary statistic value at Hmark. Default None. + h_mark : `float`, optional + The Hmark value to add to the plotting dictionary of the new + mock bundle. Default None. + results_db : `~rubin_sim.maf.db.ResultsDb`, optional + The results_db in which to record the summary statistic value at + Hmark. Default None. Returns ------- - ~rubin_sim.maf.metricBundles.MoMetricBundle + mo_metric_bundle : `~rubin_sim.maf.metricBundles.MoMetricBundle` """ bundle.set_summary_metrics(completeness_metric) - # This step adds summary values at each point to the original metric - we use this to populate - # the completeness values in the next step. However, we may not want them to go into the results_db. + # This step adds summary values at each point to the original metric - + # we use this to populate the completeness values in the next step. + # However, we may not want them to go into the results_db. bundle.compute_summary_stats(results_db) summary_name = completeness_metric.name # Make up the bundle, including the metric values. @@ -109,9 +114,6 @@ def __init__( child_metrics=None, summary_metrics=None, ): - """ - Instantiate moving object metric bundle, save metric/slicer/constraint, etc. - """ self.metric = metric self.slicer = slicer if constraint == "": @@ -187,7 +189,8 @@ def _reset_metric_bundle(self): self.summary_values = None def _build_metadata(self, info_label): - """If no info_label is provided, auto-generate it from the obs_file + constraint.""" + """If no info_label is provided, auto-generate it from the + obs_file + constraint.""" if info_label is None: try: self.info_label = self.slicer.obsfile.replace(".txt", "").replace(".dat", "") @@ -207,7 +210,8 @@ def _find_req_cols(self): def set_child_bundles(self, child_metrics=None): """ Identify any child metrics to be run on this (parent) bundle. - and create the new metric bundles that will hold the child values, linking to this bundle. + and create the new metric bundles that will hold the child values, + linking to this bundle. Remove the summaryMetrics from self afterwards. """ self.child_bundles = {} @@ -232,12 +236,13 @@ def set_child_bundles(self, child_metrics=None): def compute_summary_stats(self, results_db=None): """ - Compute summary statistics on metric_values, using summaryMetrics, for self and child bundles. + Compute summary statistics on metric_values, using summaryMetrics, + for self and child bundles. """ if self.summary_values is None: self.summary_values = {} if self.summary_metrics is not None: - # Build array of metric values, to use for (most) summary statistics. + # Build array of metric values, to use for summary statistics. for m in self.summary_metrics: summary_name = m.name summary_val = m.run(self.metric_values, self.slicer.slice_points["H"]) @@ -280,19 +285,20 @@ def __init__(self, bundle_dict, out_dir=".", results_db=None, verbose=True): def _check_compatible(self, metric_bundle1, metric_bundle2): """Check if two MetricBundles are "compatible". - Compatible indicates that the constraints, the slicers, and the maps are the same, and + Compatible indicates that the constraints, the slicers, + and the maps are the same, and that the stackers do not interfere with each other (i.e. are not trying to set the same column in different ways). Returns True if the MetricBundles are compatible, False if not. Parameters ---------- - metric_bundle1 : MetricBundle - metric_bundle2 : MetricBundle + metric_bundle1 : `MetricBundle` + metric_bundle2 : `MetricBundle` Returns ------- - bool + match : `bool` """ if metric_bundle1.constraint != metric_bundle2.constraint: return False @@ -302,24 +308,29 @@ def _check_compatible(self, metric_bundle1, metric_bundle2): return False for stacker in metric_bundle1.stacker_list: for stacker2 in metric_bundle2.stacker_list: - # If the stackers have different names, that's OK, and if they are identical, that's ok. + # If the stackers have different names, that's OK, + # and if they are identical, that's ok. if (stacker.__class__.__name__ == stacker2.__class__.__name__) & (stacker != stacker2): return False # But if we got this far, everything matches. return True def _find_compatible(self, test_keys): - """ "Private utility to find which metricBundles with keys in the list 'test_keys' can be calculated - at the same time -- having the same slicer, constraint, maps, and compatible stackers. + """ "Private utility to find which metricBundles with keys in the + list 'test_keys' can be calculated + at the same time -- having the same slicer, constraint, maps, + and compatible stackers. Parameters ----------- - test_keys : list - List of the dictionary keys (of self.bundle_dict) to test for compatibilility. + test_keys : `list` + List of the dictionary keys (of self.bundle_dict) to + test for compatibilility. Returns -------- list of lists - Returns test_keys, split into separate lists of compatible metricBundles. + Returns test_keys, split into separate lists of + compatible metricBundles. """ compatible_lists = [] for k in test_keys: @@ -334,14 +345,17 @@ def _find_compatible(self, test_keys): found_compatible = False checked_all = False while not (found_compatible) and not (checked_all): - # Go through the existing lists in compatible_lists, to see if this metricBundle matches. + # Go through the existing lists in compatible_lists, to see + # if this metricBundle matches. for compatible_list in compatible_lists: - # Compare to all the metricBundles in this subset, to check all stackers are compatible. + # Compare to all the metricBundles in this subset, + # to check all stackers are compatible. found_compatible = True for comparison_key in compatible_list: compatible = self._check_compatible(self.bundle_dict[comparison_key], b) if not compatible: - # Found a metricBundle which is not compatible, so stop and go onto the next subset. + # Found a metricBundle which is not compatible, + # so stop and go onto the next subset. found_compatible = False break checked_all = True @@ -356,14 +370,17 @@ def _find_compatible(self, test_keys): return compatible_lists def run_constraint(self, constraint): - """Calculate the metric values for all the metricBundles which match this constraint in the - metricBundleGroup. Also calculates child metrics and summary statistics, and writes all to disk. - (work is actually done in _runCompatible, so that only completely compatible sets of metricBundles + """Calculate the metric values for all the metricBundles which + match this constraint in the metricBundleGroup. + Also calculates child metrics and summary statistics, + and writes all to disk. + (work is actually done in _runCompatible, so that only completely + compatible sets of metricBundles run at the same time). Parameters ---------- - constraint : str + constraint : `str` SQL-where or pandas constraint for the metricBundles. """ # Find the dict keys of the bundles which match this constraint. @@ -376,7 +393,8 @@ def run_constraint(self, constraint): # Identify the observations which are relevant for this constraint. # This sets slicer.obs (valid for all H values). self.slicer.subset_obs(constraint) - # Identify the sets of these metricBundles can be run at the same time (also have the same stackers). + # Identify the sets of these metricBundles can be run at the same time + # (also have the same stackers). compatible_lists = self._find_compatible(keys_matching_constraint) # And now run each of those subsets of compatible metricBundles. @@ -384,22 +402,24 @@ def run_constraint(self, constraint): self._run_compatible(compatible_list) def _run_compatible(self, compatible_list): - """Calculate the metric values for set of (parent and child) bundles, as well as the summary stats, - and write to disk. + """Calculate the metric values for set of (parent and child) bundles, + as well as the summary stats, and write to disk. Parameters ----------- - compatible_list : list - List of dictionary keys, of the metricBundles which can be calculated together. - This means they are 'compatible' and have the same slicer, constraint, and non-conflicting - mappers and stackers. + compatible_list : `list` + List of dictionary keys, of the metricBundles which can be + calculated together. This means they are 'compatible' and have + the same slicer, constraint, and non-conflicting mappers and + stackers. """ if self.verbose: print("Running metrics %s" % compatible_list) - b_dict = self.bundle_dict # {key: self.bundle_dict.get(key) for key in compatible_list} + b_dict = self.bundle_dict - # Find the unique stackers and maps. These are already "compatible" (as id'd by compatible_list). + # Find the unique stackers and maps. + # These are already "compatible" (as id'd by compatible_list). uniq_stackers = [] all_stackers = [] uniq_maps = [] @@ -435,12 +455,14 @@ def _run_compatible(self, compatible_list): # Run all the parent metrics. for k in compatible_list: b = self.bundle_dict[k] - # Mask the parent metric (and then child metrics) if there was no data. + # Mask the parent metric (and then child metrics) + # if there was no data. if len(sso_obs) == 0: b.metric_values.mask[i][j] = True for cb in list(b.child_bundles.values()): cb.metric_values.mask[i][j] = True - # Otherwise, calculate the metric value for the parent, and then child. + # Otherwise, calculate the metric value for the parent, + # and then child. else: # Calculate for the parent. m_val = b.metric.run(sso_obs, slice_point["orbit"], Hval) @@ -449,7 +471,8 @@ def _run_compatible(self, compatible_list): b.metric_values.mask[i][j] = True for cb in b.child_bundles.values(): cb.metric_values.mask[i][j] = True - # Otherwise, set the parent value and calculate the child metric values as well. + # Otherwise, set the parent value and calculate + # the child metric values as well. else: b.metric_values.data[i][j] = m_val for cb in b.child_bundles.values(): @@ -469,9 +492,7 @@ def _run_compatible(self, compatible_list): b.write(out_dir=self.out_dir, results_db=self.results_db) def run_all(self): - """ - Run all constraints and metrics for these moMetricBundles. - """ + """Run all constraints and metrics for these moMetricBundles.""" for constraint in self.constraints: self.run_constraint(constraint) if self.verbose: @@ -487,7 +508,11 @@ def plot_all( closefigs=True, ): """ - Make a few generically desired plots. This needs more flexibility in the future. + Make a few generically desired plots. + Given the nature of the outputs for much of the moving object + metrics, a good deal of the plotting for the moving object batch + is handled in a custom manner joining together multiple + metricsbundles. """ plot_handler = PlotHandler( out_dir=self.out_dir, diff --git a/rubin_sim/maf/metrics/__init__.py b/rubin_sim/maf/metrics/__init__.py index 22c210eed..3c39eb153 100644 --- a/rubin_sim/maf/metrics/__init__.py +++ b/rubin_sim/maf/metrics/__init__.py @@ -17,7 +17,6 @@ from .hourglass_metric import * from .incremental_template_metric import * from .kuiper_metrics import * -from .long_gap_agn_metric import * from .mo_metrics import * from .mo_summary_metrics import * from .night_pointing_metric import * @@ -30,7 +29,6 @@ from .season_metrics import * from .simple_metrics import * from .sky_sat_metric import * -from .slew_metrics import * from .sn_cadence_metric import * from .sn_n_sn_metric import * from .sn_sl_metric import * diff --git a/rubin_sim/maf/metrics/agn_time_lag_metric.py b/rubin_sim/maf/metrics/agn_time_lag_metric.py index 12f368a37..b870f7dc3 100644 --- a/rubin_sim/maf/metrics/agn_time_lag_metric.py +++ b/rubin_sim/maf/metrics/agn_time_lag_metric.py @@ -50,7 +50,8 @@ def __init__( **kwargs, ) - # Calculate NQUIST value for time-lag and sampling time (redshift is included in formula if desired) + # Calculate NQUIST value for time-lag and sampling time + # (redshift is included in formula if desired) def _get_nquist_value(self, caden, lag, z): return lag / ((1 + z) * caden) diff --git a/rubin_sim/maf/metrics/agnstructure.py b/rubin_sim/maf/metrics/agnstructure.py index 6f54d5db2..3b95142c4 100644 --- a/rubin_sim/maf/metrics/agnstructure.py +++ b/rubin_sim/maf/metrics/agnstructure.py @@ -12,31 +12,35 @@ class SFUncertMetric(BaseMetric): - """Structure Function (SF) Uncertainty Metric. Developed on top of LogTGaps + """Structure Function (SF) Uncertainty Metric. + Developed on top of LogTGaps Adapted from Weixiang Yu & Gordon Richards at: - https://github.com/RichardsGroup/LSST_SF_Metric/blob/main/notebooks/00_SFErrorMetric.ipynb + https://github.com/RichardsGroup/ + LSST_SF_Metric/blob/main/notebooks/00_SFErrorMetric.ipynb Parameters ---------- - mag: `float` (22) + mag : `float` The magnitude of the fiducial object. Default 22. - times_col: `str` ('observationStartMJD') + times_col : `str` Time column name. Defaults to "observationStartMJD". - all_gaps: `bool` (True) + all_gaps : `bool` Whether to use all gaps (between any two pairs of observations). If False, only use consecutive paris. Defaults to True. - units: `str` ('mag') + units : `str` Unit of this metric. Defaults to "mag". - bins: `object` - An array of bin edges. Defaults to "np.logspace(0, np.log10(3650), 16)" for a + bins : `object` + An array of bin edges. + Defaults to "np.logspace(0, np.log10(3650), 16)" for a total of 15 (final) bins. - weight: `object + weight : `object` The weight assigned to each delta_t bin for deriving the final metric. - Defaults to flat weighting with sum of 1. Should have length 1 less than bins. - snr_cut : float (5) + Defaults to flat weighting with sum of 1. + Should have length 1 less than bins. + snr_cut : `float` Ignore observations below an SNR limit, default 5. - dust : `bool` (True) + dust : `bool` Apply dust extinction to the fiducial object magnitude. Default True. """ @@ -115,15 +119,17 @@ def run(self, data_slice, slice_point=None): else: dts = np.diff(times) - # bin delta_t using provided bins; if zero pair found at any delta_t bin, - # replace 0 with 0.01 to avoid the exploding 1/sqrt(n) term in this metric + # bin delta_t using provided bins; + # if zero pair found at any delta_t bin, + # replace 0 with 0.01 to avoid the exploding 1/sqrt(n) term + # in this metric result, bins = np.histogram(dts, self.bins) new_result = np.where(result > 0, result, 0.01) # compute photometric_error^2 population variance and population mean # note that variance is replaced by median_absolute_deviate^2 - # mean is replaced by median in this implementation to make it robust to - # outliers in simulations (e.g., dcr simulations) + # mean is replaced by median in this implementation to make it robust + # to outliers in simulations (e.g., dcr simulations) err_var = mag_err**2 err_var_mu = np.median(err_var) err_var_std = mad_std(err_var) diff --git a/rubin_sim/maf/metrics/area_summary_metrics.py b/rubin_sim/maf/metrics/area_summary_metrics.py index 78119965d..1890953c8 100644 --- a/rubin_sim/maf/metrics/area_summary_metrics.py +++ b/rubin_sim/maf/metrics/area_summary_metrics.py @@ -8,21 +8,23 @@ class AreaSummaryMetric(BaseMetric): """ - Find the min/max of a value in the best area. This is a handy substitute for when - users want to know "the WFD value". + Find the min/max of a value over the area with the 'best' results + in the metric. + This is a handy substitute for when users want to know "the WFD value". Parameters ---------- - area : float (18000) + area : `float` The area to consider (sq degrees) - decreasing : bool (True) - Should the values be sorted by increasing or decreasing order. For values where - "larger is better", decreasing is probably what you want. For metrics where - "smaller is better" (e.g., astrometric precission), set decreasing to False. + decreasing : `bool` + Should the values be sorted by increasing or decreasing order. + For values where "larger is better", decreasing (True) is probably + what you want. For metrics where "smaller is better" + (e.g., astrometric precission), set decreasing to False. reduce_func : None - The function to reduce the clipped values by. Will default to min/max depending on - the bool val of the decreasing kwarg. - + The function to reduce the clipped values by. + Will default to min/max depending on the bool val of the decreasing + kwarg. """ def __init__( @@ -64,11 +66,12 @@ def run(self, data_slice, slice_point=None): class AreaThresholdMetric(BaseMetric): - """ - Find the amount of area on the sky that meets a given threshold value. + """Find the amount of area on the sky that meets a given threshold value. - The area per pixel is determined from the size of the metric_values array passed to the summary metric. - This assumes that both all values are passed and that the metric was calculated with a healpix slicer. + The area per pixel is determined from the size of the metric_values + array passed to the summary metric. + This assumes that both all values are passed and that the metric was + calculated with a healpix slicer. Parameters ---------- diff --git a/rubin_sim/maf/metrics/base_metric.py b/rubin_sim/maf/metrics/base_metric.py index bb6fb2b63..030c07aba 100644 --- a/rubin_sim/maf/metrics/base_metric.py +++ b/rubin_sim/maf/metrics/base_metric.py @@ -1,10 +1,4 @@ # Base class for metrics - defines methods which must be implemented. -# If a metric calculates a vector or list at each gridpoint, then there -# should be additional 'reduce_*' functions defined, to convert the vector -# into scalar (and thus plottable) values at each gridpoint. -# The philosophy behind keeping the vector instead of the scalar at each gridpoint -# is that these vectors may be expensive to compute; by keeping/writing the full -# vector we permit multiple 'reduce' functions to be executed on the same data. __all__ = ("MetricRegistry", "BaseMetric", "ColRegistry") @@ -114,8 +108,9 @@ class BaseMetric(metaclass=MetricRegistry): ---------- col : `str` or `list` [`str`] Names of the data columns that the metric will use. - The columns required for each metric is tracked in the ColRegistry, and used to retrieve data - from the opsim database. Can be a single string or a list. + The columns required for each metric is tracked in the ColRegistry, + and used to retrieve data from the opsim database. + Can be a single string or a list. metric_name : `str` Name to use for the metric (optional - if not set, will be derived). maps : `list` [`rubin_sim.maf.maps`] @@ -143,9 +138,11 @@ def __init__( badval=-666, mask_val=None, ): - # Turn cols into numpy array so we know we can iterate over the columns. + # Turn cols into numpy array so we know + # we can iterate over the columns. self.col_name_arr = np.array(col, copy=False, ndmin=1) - # To support simple metrics operating on a single column, set self.colname + # To support simple metrics operating on a single column, + # set self.colname if len(self.col_name_arr) == 1: self.colname = self.col_name_arr[0] # Add the columns to the colRegistry. @@ -161,7 +158,8 @@ def __init__( # Save a unique name for the metric. self.name = metric_name if self.name is None: - # If none provided, construct our own from the class name and the data columns. + # If none provided, construct our own from the class name + # and the data columns. self.name = ( self.__class__.__name__.replace("Metric", "", 1) + " " @@ -197,7 +195,7 @@ def __init__( # Default to only return one metric value per slice self.shape = 1 - def run(self, data_slice, slice_point=None): + def run(self, data_slice, slice_point): """Calculate metric values. Parameters @@ -207,11 +205,11 @@ def run(self, data_slice, slice_point=None): use to calculate metric values at each slice_point. slice_point : `dict` or None Dictionary of slice_point metadata passed to each metric. - E.g. the ra/dec of the healpix pixel or opsim fieldId. + E.g. the ra/dec of the healpix pixel. Returns ------- - metricValue: `int` `float` or `object` + metricValue : `int` `float` or `object` The metric value at each slice_point. """ raise NotImplementedError("Please implement your metric calculation.") diff --git a/rubin_sim/maf/metrics/brown_dwarf_metric.py b/rubin_sim/maf/metrics/brown_dwarf_metric.py index b652466c0..6fc299858 100644 --- a/rubin_sim/maf/metrics/brown_dwarf_metric.py +++ b/rubin_sim/maf/metrics/brown_dwarf_metric.py @@ -43,32 +43,42 @@ def bd_colors(spec_type): class BDParallaxMetric(BaseMetric): - """Calculate the distance to which one could reach a parallax SNR for a given object + """Calculate the distance to which one could reach a parallax SNR for a + given object + Modification of ParallaxMetric, illustrated in - https://github.com/jgizis/LSST-BD-Cadence/blob/main/bd_allLT_baseline_17.ipynb + https://github.com/jgizis/ + LSST-BD-Cadence/blob/main/bd_allLT_baseline_17.ipynb - Uses columns ra_pi_amp and dec_pi_amp, calculated by the ParallaxFactorStacker. + Uses columns ra_pi_amp and dec_pi_amp, + calculated by the ParallaxFactorStacker. Parameters ---------- metricName : `str`, opt Default 'parallax'. m5_col : `str`, opt - The default column name for m5 information in the input data. Default fiveSigmaDepth. + The default column name for m5 information in the input data. + Default fiveSigmaDepth. filter_col : `str`, opt The column name for the filter information. Default filter. seeing_col : `str`, opt - The column name for the seeing information. Since the astrometry errors are based on the physical - size of the PSF, this should be the FWHM of the physical psf. Default seeingFwhmGeom. - mags : `dict` (None) - The absolute magnitude of the obeject in question. Keys of filter name, values in mags. + The column name for the seeing information. + Since the astrometry errors are based on the physical + size of the PSF, this should be the FWHM of the physical psf. + Default seeingFwhmGeom. + mags : `dict` or None + The absolute magnitude of the object in question. + Keys of filter name, values in mags. Defaults to an L7 spectral type if None. - distances : np.array + distances : `np.array`, (N,) Distances to try putting the object at (pc). atm_err : `float`, opt - The expected centroiding error due to the atmosphere, in arcseconds. Default 0.01. + The expected centroiding error due to the atmosphere, in arcseconds. + Default 0.01. badval : `float`, opt - The value to return when the metric value cannot be calculated. Default 0. + The value to return when the metric value cannot be calculated. + Default 0. """ def __init__( @@ -107,7 +117,8 @@ def __init__( def _final_sigma(self, position_errors, ra_pi_amp, dec_pi_amp): """Assume parallax in RA and DEC are fit independently, then combined. - All inputs assumed to be arcsec""" + All inputs assumed to be arcsec. + """ sigma_a = position_errors / ra_pi_amp sigma_b = position_errors / dec_pi_amp sigma_ra = np.sqrt(1.0 / np.sum(1.0 / sigma_a**2, axis=1)) @@ -139,7 +150,7 @@ def run(self, data_slice, slice_point=None): class VolumeSumMetric(BaseMetric): - """Compute the total volume assuming a metric has values of distance""" + """Compute the total volume assuming a metric has values of distance.""" def __init__(self, col=None, metric_name="VolumeSum", nside=None, **kwargs): super(VolumeSumMetric, self).__init__(col=col, metric_name=metric_name, **kwargs) diff --git a/rubin_sim/maf/metrics/cadence_metrics.py b/rubin_sim/maf/metrics/cadence_metrics.py index 85c5b8356..385d22a46 100644 --- a/rubin_sim/maf/metrics/cadence_metrics.py +++ b/rubin_sim/maf/metrics/cadence_metrics.py @@ -24,14 +24,17 @@ def __init__(self, filter_col="filter", metric_name="fS", **kwargs): super().__init__(cols=cols, metric_name=metric_name, units="fS", **kwargs) def run(self, data_slice, slice_point=None): - # We could import this from the m5_flat_sed values, but it makes sense to calculate the m5 - # directly from the throughputs. This is easy enough to do and will allow variation of + # We could import this from the m5_flat_sed values, + # but it makes sense to calculate the m5 + # directly from the throughputs. This is easy enough to do and + # will allow variation of # the throughput curves and readnoise and visit length, etc. pass class TemplateExistsMetric(BaseMetric): - """Calculate the fraction of images with a previous template image of desired quality.""" + """Calculate the fraction of images with a previous template + image of desired quality.""" def __init__( self, @@ -52,7 +55,8 @@ def run(self, data_slice, slice_point=None): data_slice.sort(order=self.observation_start_mjd_col) # Find the minimum seeing up to a given time seeing_mins = np.minimum.accumulate(data_slice[self.seeing_col]) - # Find the difference between the seeing and the minimum seeing at the previous visit + # Find the difference between the seeing and the minimum seeing + # at the previous visit seeing_diff = data_slice[self.seeing_col] - np.roll(seeing_mins, 1) # First image never has a template; check how many others do good = np.where(seeing_diff[1:] >= 0.0)[0] @@ -63,14 +67,16 @@ def run(self, data_slice, slice_point=None): class UniformityMetric(BaseMetric): """Calculate how uniformly the observations are spaced in time. - This is based on how a KS-test works: look at the cumulative distribution of observation dates, + This is based on how a KS-test works: + look at the cumulative distribution of observation dates, and compare to a perfectly uniform cumulative distribution. Perfectly uniform observations = 0, perfectly non-uniform = 1. Parameters ---------- mjd_col : `str`, optional - The column containing time for each observation. Default "observationStartMJD". + The column containing time for each observation. + Default "observationStartMJD". survey_length : `float`, optional The overall duration of the survey. Default 10. """ @@ -85,7 +91,8 @@ def run(self, data_slice, slice_point=None): # If only one observation, there is no uniformity if data_slice[self.mjd_col].size == 1: return 1 - # Scale dates to lie between 0 and 1, where 0 is the first observation date and 1 is surveyLength + # Scale dates to lie between 0 and 1, + # where 0 is the first observation date and 1 is surveyLength dates = (data_slice[self.mjd_col] - data_slice[self.mjd_col].min()) / (self.survey_length * 365.25) dates.sort() # Just to be sure n_cum = np.arange(1, dates.size + 1) / float(dates.size) @@ -96,7 +103,8 @@ def run(self, data_slice, slice_point=None): class GeneralUniformityMetric(BaseMetric): """Calculate how uniformly any values are distributed. - This is based on how a KS-test works: look at the cumulative distribution of data, + This is based on how a KS-test works: + look at the cumulative distribution of data, and compare to a perfectly uniform cumulative distribution. Perfectly uniform observations = 0, perfectly non-uniform = 1. To be "perfectly uniform" here, the endpoints need to be included. @@ -105,7 +113,8 @@ class GeneralUniformityMetric(BaseMetric): ---------- col : `str`, optional The column of data to use for the metric. - The default is "observationStartMJD" as this is most typically used with time. + The default is "observationStartMJD" as this is most + typically used with time. min_value : `float`, optional The minimum value expected for the data. Default None will calculate use the minimum value in this dataslice @@ -117,7 +126,6 @@ class GeneralUniformityMetric(BaseMetric): """ def __init__(self, col="observationStartMJD", units="", min_value=None, max_value=None, **kwargs): - """survey_length = time span of survey (years)""" self.col = col super().__init__(col=self.col, units=units, **kwargs) self.min_value = min_value @@ -127,7 +135,8 @@ def run(self, data_slice, slice_point=None): # If only one observation, there is no uniformity if data_slice[self.col].size == 1: return 1 - # Scale values to lie between 0 and 1, where 0 is the min_value and 1 is max_value + # Scale values to lie between 0 and 1, + # where 0 is the min_value and 1 is max_value if self.min_value is None: min_value = data_slice[self.col].min() else: @@ -144,9 +153,11 @@ def run(self, data_slice, slice_point=None): class RapidRevisitUniformityMetric(BaseMetric): - """Calculate uniformity of time between consecutive visits on short timescales (for RAV1). + """Calculate uniformity of time between consecutive visits on + short timescales (for RAV1). - Uses a the same 'uniformity' calculation as the UniformityMetric, based on the KS-test. + Uses the same 'uniformity' calculation as the UniformityMetric, + based on the KS-test. A value of 0 is perfectly uniform; a value of 1 is purely non-uniform. Parameters @@ -154,7 +165,8 @@ class RapidRevisitUniformityMetric(BaseMetric): mjd_col : `str`, optional The column containing the 'time' value. Default observationStartMJD. min_nvisits : `int`, optional - The minimum number of visits required within the time interval (d_tmin to d_tmax). + The minimum number of visits required within the + time interval (d_tmin to d_tmax). Default 100. d_tmin : `float`, optional The minimum dTime to consider (in days). Default 40 seconds. @@ -176,7 +188,8 @@ def __init__( self.d_tmin = d_tmin self.d_tmax = d_tmax super().__init__(col=self.mjd_col, metric_name=metric_name, **kwargs) - # Update min_nvisits, as 0 visits will crash algorithm and 1 is nonuniform by definition. + # Update min_nvisits, as 0 visits will crash algorithm + # and 1 is nonuniform by definition. if self.min_nvisits <= 1: self.min_nvisits = 2 @@ -230,16 +243,19 @@ def run(self, data_slice, slice_point=None): class NRevisitsMetric(BaseMetric): - """Calculate the number of consecutive visits with time differences less than d_t. + """Calculate the number of consecutive visits with + time differences less than d_t. Parameters ---------- d_t : `float`, optional The time interval to consider (in minutes). Default 30. normed : `bool`, optional - Flag to indicate whether to return the total number of consecutive visits with time - differences less than d_t (False), or the fraction of overall visits (True). - Note that we would expect (if all visits occur in pairs within d_t) this fraction would be 0.5! + Flag to indicate whether to return the total number of + consecutive visits with time differences less than d_t (False), + or the fraction of overall visits (True). + Note that we would expect (if all visits occur in pairs within d_t) + this fraction would be 0.5! """ def __init__(self, mjd_col="observationStartMJD", d_t=30.0, normed=False, metric_name=None, **kwargs): @@ -267,12 +283,14 @@ def run(self, data_slice, slice_point=None): class IntraNightGapsMetric(BaseMetric): """ - Calculate the (reduce_func) of the gap between consecutive observations within a night, in hours. + Calculate the (reduce_func) of the gap between consecutive + observations within a night, in hours. Parameters ---------- reduce_func : function, optional - Function that can operate on array-like structures. Typically numpy function. + Function that can operate on array-like structures. + Typically numpy function. Default np.median. """ @@ -306,12 +324,14 @@ def run(self, data_slice, slice_point=None): class InterNightGapsMetric(BaseMetric): - """Calculate the (reduce_func) of the gap between consecutive observations in different nights, in days. + """Calculate the (reduce_func) of the gap between consecutive + observations in different nights, in days. Parameters ---------- reduce_func : function, optional - Function that can operate on array-like structures. Typically numpy function. + Function that can operate on array-like structures. + Typically numpy function. Default np.median. """ @@ -344,16 +364,18 @@ def run(self, data_slice, slice_point=None): class VisitGapMetric(BaseMetric): - """Calculate the (reduce_func) of the gap between any consecutive observations, in hours, - regardless of night boundaries. + """Calculate the (reduce_func) of the gap between any + consecutive observations, in hours, regardless of night boundaries. - Different from inter-night and intra-night gaps, between this is really just counting - all of the times between consecutive observations (not time between nights or time within a night). + Different from inter-night and intra-night gaps, + because this is really just counting all of the times between consecutive + observations (not time between nights or time within a night). Parameters ---------- reduce_func : function, optional - Function that can operate on array-like structures. Typically numpy function. + Function that can operate on array-like structures. + Typically numpy function. Default np.median. """ diff --git a/rubin_sim/maf/metrics/calibration_metrics.py b/rubin_sim/maf/metrics/calibration_metrics.py index 2c7484808..498f447de 100644 --- a/rubin_sim/maf/metrics/calibration_metrics.py +++ b/rubin_sim/maf/metrics/calibration_metrics.py @@ -97,11 +97,11 @@ def __init__( "This normalized version of the metric displays the " "estimated uncertainty in the parallax measurement, " ) - self.comment += "divided by the minimum parallax uncertainty possible " "(if all visits were six " - self.comment += ( - "months apart). Values closer to 1 indicate more optimal " - "scheduling for parallax measurement." - ) + self.comment += "divided by the minimum parallax uncertainty possible " + self.comment += "(if all visits were six months apart). " + self.comment += "Values closer to 1 indicate more optimal "\ + "scheduling for parallax measurement." + def _final_sigma(self, position_errors, ra_pi_amp, dec_pi_amp): """Assume parallax in RA and DEC are fit independently, then combined. @@ -127,7 +127,8 @@ def run(self, data_slice, slice_point=None): position_errors = mafUtils.astrom_precision(data_slice[self.seeing_col], snr, self.atm_err) sigma = self._final_sigma(position_errors, data_slice["ra_pi_amp"], data_slice["dec_pi_amp"]) if self.normalize: - # Leave the dec parallax as zero since one can't have ra and dec maximized at the same time. + # Leave the dec parallax as zero since one can't have + # ra and dec maximized at the same time. sigma = ( self._final_sigma( position_errors, @@ -146,32 +147,41 @@ class ProperMotionMetric(BaseMetric): Parameters ---------- - metricName : str, optional + metricName : `str`, optional Default 'properMotion'. - m5_col : str, optional - The default column name for m5 information in the input data. Default fiveSigmaDepth. - mjd_col : str, optional + m5_col : `str`, optional + The default column name for m5 information in the input data. + Default fiveSigmaDepth. + mjd_col : `str`, optional The column name for the exposure time. Default observationStartMJD. - filterCol : str, optional + filterCol : `str`, optional The column name for the filter information. Default filter. - seeing_col : str, optional - The column name for the seeing information. Since the astrometry errors are based on the physical - size of the PSF, this should be the FWHM of the physical psf. Default seeingFwhmGeom. - rmag : float, optional - The r magnitude of the fiducial star in r band. Other filters are sclaed using sedTemplate keyword. + seeing_col : `str`, optional + The column name for the seeing information. + Since the astrometry errors are based on the physical + size of the PSF, this should be the FWHM of the physical psf. + Default seeingFwhmGeom. + rmag : `float`, optional + The r magnitude of the fiducial star in r band. + Other filters are sclaed using sedTemplate keyword. Default 20.0 - SedTemplate : str, optional - The template to use. This can be 'flat' or 'O','B','A','F','G','K','M'. Default flat. - atm_err : float, optional - The expected centroiding error due to the atmosphere, in arcseconds. Default 0.01. + SedTemplate : `str`, optional + The template to use. This can be 'flat' or 'O','B','A','F','G','K','M'. + Default flat. + atm_err : `float`, optional + The expected centroiding error due to the atmosphere, in arcseconds. + Default 0.01. normalize : `bool`, optional - Compare the astrometric uncertainty to the uncertainty that would result if half the observations - were taken at the start and half at the end. A perfect survey will have a value close to 1, while + Compare the astrometric uncertainty to the uncertainty that would + result if half the observations were taken at the start and half + at the end. A perfect survey will have a value close to 1, while a poorly scheduled survey will be close to 0. Default False. - baseline : float, optional - The length of the survey used for the normalization, in years. Default 10. - badval : float, optional - The value to return when the metric value cannot be calculated. Default -666. + baseline : `float`, optional + The length of the survey used for the normalization, in years. + Default 10. + badval : `float`, optional + The value to return when the metric value cannot be calculated. + Default -666. """ def __init__( @@ -225,8 +235,8 @@ def __init__( self.comment += ( "This normalized version of the metric represents " "the estimated uncertainty in the proper " ) - self.comment += "motion divided by the minimum uncertainty possible " "(if all visits were " - self.comment += "obtained on the first and last days of the survey). " + self.comment += "motion divided by the minimum uncertainty possible " + self.comment += "(if all visits were obtained on the first and last days of the survey). " self.comment += "Values closer to 1 indicate more optimal scheduling." def run(self, data_slice, slice_point=None): @@ -257,50 +267,61 @@ def run(self, data_slice, slice_point=None): class ParallaxCoverageMetric(BaseMetric): - """ - Check how well the parallax factor is distributed. Subtracts the weighted mean position of the + """Check how well the parallax factor is distributed. + + Subtracts the weighted mean position of the parallax offsets, then computes the weighted mean radius of the points. - If points are well distributed, the mean radius will be near 1. If phase coverage is bad, - radius will be close to zero. + If points are well distributed, the mean radius will be near 1. + If phase coverage is bad, radius will be close to zero. - For points on the Ecliptic, uniform sampling should result in a metric value of ~0.5. + For points on the Ecliptic, uniform sampling should result in a + metric value of ~0.5. At the poles, uniform sampling would result in a metric value of ~1. - Conceptually, it is helpful to remember that the parallax motion of a star at the pole is - a (nearly circular) ellipse while the motion of a star on the ecliptic is a straight line. Thus, any - pair of observations separated by 6 months will give the full parallax range for a star on the pole - but only observations on very specific dates will give the full range for a star on the ecliptic. + Conceptually, it is helpful to remember that the parallax motion of a + star at the pole is a (nearly circular) ellipse while the motion of a + star on the ecliptic is a straight line. Thus, any pair of observations + separated by 6 months will give the full parallax range for a star on + the pole but only observations on very specific dates will give the + full range for a star on the ecliptic. - Optionally also demand that there are observations above the snr_limit kwarg spanning theta_range radians. + Optionally also demand that there are observations above the snr_limit + kwarg spanning theta_range radians. Parameters ---------- - m5_col: str, optional + m5_col : `str`, optional Column name for individual visit m5. Default fiveSigmaDepth. - mjd_col: str, optional + mjd_col : `str`, optional Column name for exposure time dates. Default observationStartMJD. - filter_col: str, optional + filter_col : `str`, optional Column name for filter. Default filter. - seeing_col: str, optional + seeing_col : `str`, optional Column name for seeing (assumed FWHM). Default seeingFwhmGeom. - rmag: float, optional - Magnitude of fiducial star in r filter. Other filters are scaled using sedTemplate keyword. + rmag : `float`, optional + Magnitude of fiducial star in r filter. + Other filters are scaled using sedTemplate keyword. Default 20.0 - sedTemplate: str, optional - Template to use (can be 'flat' or 'O','B','A','F','G','K','M'). Default 'flat'. - atm_err: float, optional - Centroiding error due to atmosphere in arcsec. Default 0.01 (arcseconds). - theta_range: float, optional - Range of parallax offset angles to demand (in radians). Default=0 (means no range requirement). - snr_limit: float, optional - Only include points above the snr_limit when computing theta_range. Default 5. + sedTemplate : `str`, optional + Template to use (can be 'flat' or 'O','B','A','F','G','K','M'). + Default 'flat'. + atm_err : `float`, optional + Centroiding error due to atmosphere in arcsec. + Default 0.01 (arcseconds). + theta_range : `float`, optional + Range of parallax offset angles to demand (in radians). + Default=0 (means no range requirement). + snr_limit : `float`, optional + Only include points above the snr_limit when computing theta_range. + Default 5. Returns -------- - metricValue: float + metricValu e: `float` Returns a weighted mean of the length of the parallax factor vectors. Values near 1 imply that the points are well distributed. Values near 0 imply that the parallax phase coverage is bad. - Near the ecliptic, uniform sampling results in metric values of about 0.5. + Near the ecliptic, uniform sampling results in metric values + of about 0.5. Notes ----- @@ -354,7 +375,7 @@ def _theta_check(self, ra_pi_amp, dec_pi_amp, snr): theta = theta - np.min(theta) result = 0.0 if np.max(theta) >= self.theta_range: - # Check that things are in differnet quadrants + # Check that things are in different quadrants theta = (theta + np.pi) % 2.0 * np.pi theta = theta - np.min(theta) if np.max(theta) >= self.theta_range: @@ -397,36 +418,42 @@ def run(self, data_slice, slice_point=None): class ParallaxDcrDegenMetric(BaseMetric): - """Use the full parallax and DCR displacement vectors to find if they are degenerate. + """Use the full parallax and DCR displacement vectors to find if they + are degenerate. Parameters ---------- - metricName : str, optional + metricName : `str`, optional Default 'ParallaxDcrDegenMetric'. - seeing_col : str, optional + seeing_col : `str`, optional Default 'FWHMgeom' - m5_col : str, optional + m5_col : `str`, optional Default 'fiveSigmaDepth' - filter_col : str + filter_col : `str` Default 'filter' - atm_err : float - Minimum error in photometry centroids introduced by the atmosphere (arcseconds). Default 0.01. - rmag : float + atm_err : `float` + Minimum error in photometry centroids introduced by the atmosphere + (arcseconds). Default 0.01. + rmag : `float` r-band magnitude of the fiducual star that is being used (mag). - SedTemplate : str - The SED template to use for fiducia star colors, passed to rubin_scheduler.utils.stellarMags. + SedTemplate : `str` + The SED template to use for fiducia star colors, + passed to rubin_scheduler.utils.stellarMags. Default 'flat' - tol : float - Tolerance for how well curve_fit needs to work before believing the covariance result. + tol : `float` + Tolerance for how well curve_fit needs to work before + believing the covariance result. Default 0.05. Returns ------- - metricValue : float - Returns the correlation coefficient between the best-fit parallax amplitude and DCR amplitude. - The RA and Dec offsets are fit simultaneously. Values close to zero are good, values close to +/- 1 - are bad. Experience with fitting Monte Carlo simulations suggests the astrometric fits start - becoming poor around a correlation of 0.7. + metricValue : `float` + Returns the correlation coefficient between the best-fit parallax + amplitude and DCR amplitude. + The RA and Dec offsets are fit simultaneously. + Values close to zero are good, values close to +/- 1 are bad. + Experience with fitting Monte Carlo simulations suggests the + astrometric fits start becoming poor around a correlation of 0.7. """ def __init__( @@ -465,39 +492,46 @@ def __init__( self.atm_err = atm_err def _positions(self, x, a, b): - """ - Function to find parallax and dcr amplitudes + """Function to find parallax and dcr amplitudes - x should be a vector with [[parallax_x1, parallax_x2..., parallax_y1, parallax_y2...], + x should be a vector with [[parallax_x1, parallax_x2..., + parallax_y1, parallax_y2...], [dcr_x1, dcr_x2..., dcr_y1, dcr_y2...]] """ result = a * x[0, :] + b * x[1, :] return result def run(self, data_slice, slice_point=None): - # The idea here is that we calculate position errors (in RA and Dec) for all observations. - # Then we generate arrays of the parallax offsets (delta RA parallax = ra_pi_amp, etc) - # and the DCR offsets (delta RA DCR = ra_dcr_amp, etc), and just add them together into one - # RA (and Dec) offset. Then, we try to fit for how we combined these offsets, but while - # considering the astrometric noise. If we can figure out that we just added them together - # (i.e. the curve_fit result is [a=1, b=1] for the function _positions above) - # then we should be able to disentangle the parallax and DCR offsets when fitting 'for real'. + # The idea here is that we calculate position errors (in RA and Dec) + # for all observations. Then we generate arrays of the parallax + # offsets (delta RA parallax = ra_pi_amp, etc) and the DCR offsets + # (delta RA DCR = ra_dcr_amp, etc), and just add them together into one + # RA (and Dec) offset. Then, we try to fit for how we combined these + # offsets, but while considering the astrometric noise. If we can + # figure out that we just added them together + # (i.e. the curve_fit result is [a=1, b=1] for the function + # _positions above) then we should be able to disentangle the + # parallax and DCR offsets when fitting 'for real'. # compute SNR for all observations snr = np.zeros(len(data_slice), dtype="float") for filt in np.unique(data_slice[self.filter_col]): in_filt = np.where(data_slice[self.filter_col] == filt) snr[in_filt] = mafUtils.m52snr(self.mags[filt], data_slice[self.m5_col][in_filt]) # Compute the centroiding uncertainties - # Note that these centroiding uncertainties depend on the physical size of the PSF, thus - # we are using seeingFwhmGeom for these metrics, not seeingFwhmEff. + # Note that these centroiding uncertainties depend on the physical + # size of the PSF, thus we are using seeingFwhmGeom for these metrics, + # not seeingFwhmEff. position_errors = mafUtils.astrom_precision(data_slice[self.seeing_col], snr, self.atm_err) - # Construct the vectors of RA/Dec offsets. xdata is the "input data". ydata is the "output". + # Construct the vectors of RA/Dec offsets. xdata is the "input data". + # ydata is the "output". xdata = np.empty((2, data_slice.size * 2), dtype=float) xdata[0, :] = np.concatenate((data_slice["ra_pi_amp"], data_slice["dec_pi_amp"])) xdata[1, :] = np.concatenate((data_slice["ra_dcr_amp"], data_slice["dec_dcr_amp"])) ydata = np.sum(xdata, axis=0) - # Use curve_fit to compute covariance between parallax and dcr amplitudes - # Set the initial guess slightly off from the correct [1,1] to make sure it iterates. + # Use curve_fit to compute covariance between parallax + # and dcr amplitudes + # Set the initial guess slightly off from the correct [1,1] to + # make sure it iterates. popt, pcov = curve_fit( self._positions, xdata, @@ -511,7 +545,8 @@ def run(self, data_slice, slice_point=None): return self.badval # Covariance between best fit parallax amplitude and DCR amplitude. cov = pcov[1, 0] - # Convert covarience between parallax and DCR amplitudes to normalized correlation + # Convert covariance between parallax and DCR amplitudes to normalized + # correlation perr = np.sqrt(np.diag(pcov)) correlation = cov / (perr[0] * perr[1]) result = correlation @@ -522,13 +557,26 @@ def run(self, data_slice, slice_point=None): def calc_dist_cosines(ra1, dec1, ra2, dec2): - # Taken from simSelfCalib.py """Calculates distance on a sphere using spherical law of cosines. - Give this function RA/Dec values in radians. Returns angular distance(s), in radians. - Note that since this is all numpy, you could input arrays of RA/Decs.""" + Note: floats can be replaced by numpy arrays of RA/Dec. + For very small distances, rounding errors may cause distance errors. + + Parameters + ---------- + ra1, dec1 : `float`, `float` + RA and Dec of one point. (radians) + ra2, dec2 : `float`, `float` + RA and Dec of another point. (radians) + + Returns + ------- + distance : `float` + Angular distance between the points in radians. + """ # This formula can have rounding errors for case where distances are small. - # Oh, the joys of wikipedia - http://en.wikipedia.org/wiki/Great-circle_distance + # Oh, the joys of wikipedia - + # http://en.wikipedia.org/wiki/Great-circle_distance # For the purposes of these calculations, this is probably accurate enough. D = np.sin(dec2) * np.sin(dec1) + np.cos(dec1) * np.cos(dec2) * np.cos(ra2 - ra1) D = np.arccos(D) @@ -536,7 +584,9 @@ def calc_dist_cosines(ra1, dec1, ra2, dec2): class RadiusObsMetric(BaseMetric): - """find the radius in the focal plane. returns things in degrees.""" + """Evaluate slice point radial position in the focal plane of each visit, + reducing to the mean, rms and full range of these radial distances. + """ def __init__( self, metric_name="radiusObs", ra_col="fieldRA", dec_col="fieldDec", units="radians", **kwargs diff --git a/rubin_sim/maf/metrics/coverage_metric.py b/rubin_sim/maf/metrics/coverage_metric.py index 23ef98e7e..d20333680 100644 --- a/rubin_sim/maf/metrics/coverage_metric.py +++ b/rubin_sim/maf/metrics/coverage_metric.py @@ -6,22 +6,26 @@ class YearCoverageMetric(BaseMetric): - """Count the number of bins covered by night_col -- default bins are 'years'. - Handy for checking that a point on the sky gets observed every year, as the default settings - result in the metric returning the number years in the data_slice (when used with a HealpixSlicer). + """Count the number of `bins` covered by night_col. + + The default `bins` cover years 0 to 10. + Handy for checking that a point on the sky gets observed every year, + as the default settings result in the metric returning the number years + in the data_slice (when used with a HealpixSlicer). Parameters ---------- - night_col: str, optional + night_col : `str`, opt Data column to histogram. Default 'night'. - bins: numpy.ndarray, optional - Bins to use in the histogram. Default corresponds to years 0-10 (with 365.25 nights per year). - units: str, optional + bins : `np.ndarray`, (N,), opt + Bins to use in the histogram. Default corresponds to years 0-10 + (with 365.25 nights per year). + units : `str`, opt Units to use for the metric result. Default 'N years'. Returns ------- - integer + nbins : `int` Number of histogram bins where the histogram value is greater than 0. Typically this will be the number of years in the 'night_col'. """ diff --git a/rubin_sim/maf/metrics/crowding_metric.py b/rubin_sim/maf/metrics/crowding_metric.py index ddb9e1fdf..4e3e7caff 100644 --- a/rubin_sim/maf/metrics/crowding_metric.py +++ b/rubin_sim/maf/metrics/crowding_metric.py @@ -6,33 +6,35 @@ from rubin_sim.maf.metrics import BaseMetric -# Modifying from Knut Olson's fork at: -# https://github.com/knutago/sims_maf_contrib/blob/master/tutorials/CrowdingMetric.ipynb +# Originally contributed by Knut Olson (@knutago). def _comp_crowd_error(mag_vector, lum_func, seeing, single_mag=None): """ - Compute the photometric crowding error given the luminosity function and best seeing. + Compute the photometric crowding error given the luminosity + function and best seeing. + + Equation from Olsen, Blum, & Rigaut 2003, AJ, 126, 452 Parameters ---------- - mag_vector : np.array + mag_vector : `np.array` (N,) Stellar magnitudes. - lum_func : np.array + lum_func : `np.array` (N,) Stellar luminosity function. - seeing : float - The best seeing conditions. Assuming forced-photometry can use the best seeing conditions + seeing : `float` + The best seeing conditions. + Assuming forced-photometry can use the best seeing conditions to help with confusion errors. - single_mag : float (None) - If single_mag is None, the crowding error is calculated for each mag in mag_vector. If - single_mag is a float, the crowding error is interpolated to that single value. + single_mag : `float` or None + If single_mag is None, the crowding error is calculated + for each mag in mag_vector. If single_mag is a float, + the crowding error is interpolated to that single value. Returns ------- - np.array + mag_uncert : `np.array` (N,) Magnitude uncertainties. - - Equation from Olsen, Blum, & Rigaut 2003, AJ, 126, 452 """ lum_area_arcsec = 3600.0**2 lum_vector = 10 ** (-0.4 * mag_vector) @@ -47,7 +49,29 @@ def _comp_crowd_error(mag_vector, lum_func, seeing, single_mag=None): class CrowdingM5Metric(BaseMetric): - """Return the magnitude at which the photometric error exceeds crowding_error threshold.""" + """Calculate the magnitude at which the photometric error exceeds + the crowding error threshold. + + Parameters + ---------- + crowding_error : `float`, optional + The magnitude uncertainty from crowding in magnitudes. + Default 0.1 mags. + filtername : `str`, optional + The bandpass in which to calculate the crowding limit. Default r. + seeing_col : `str`, optional + The name of the seeing column. + m5Col : `str`, optional + The name of the m5 depth column. + maps : `list` [`str`], optional + Names of maps required for the metric. + + Returns + ------- + mag : `float` + The magnitude of a star which has a photometric error of + `crowding_error` + """ def __init__( self, @@ -58,26 +82,6 @@ def __init__( maps=["StellarDensityMap"], **kwargs, ): - """ - Parameters - ---------- - crowding_error : float, optional - The magnitude uncertainty from crowding in magnitudes. Default 0.1 mags. - filtername: str, optional - The bandpass in which to calculate the crowding limit. Default r. - seeing_col : str, optional - The name of the seeing column. - m5Col : str, optional - The name of the m5 depth column. - maps : list of str, optional - Names of maps required for the metric. - - Returns - ------- - float - The magnitude of a star which has a photometric error of `crowding_error` - """ - cols = [seeing_col] units = "mag" self.crowding_error = crowding_error @@ -88,13 +92,16 @@ def __init__( super().__init__(col=cols, maps=maps, units=units, metric_name=metric_name, **kwargs) def run(self, data_slice, slice_point=None): - # Set mag_vector to the same length as starLumFunc (lower edge of mag bins) + # Set mag_vector to the same length as starLumFunc + # (lower edge of mag bins) mag_vector = slice_point[f"starMapBins_{self.filtername}"][1:] # Pull up density of stars at this point in the sky lum_func = slice_point[f"starLumFunc_{self.filtername}"] - # Calculate the crowding error using the best seeing value (in any filter?) + # Calculate the crowding error using the best seeing value + # (in any filter?) crowd_error = _comp_crowd_error(mag_vector, lum_func, seeing=min(data_slice[self.seeing_col])) - # Locate at which point crowding error is greater than user-defined limit + # Locate at which point crowding error is greater than user-defined + # limit above_crowd = np.where(crowd_error >= self.crowding_error)[0] if np.size(above_crowd) == 0: @@ -107,8 +114,29 @@ def run(self, data_slice, slice_point=None): class NstarsMetric(BaseMetric): - """Return the number of stars visible above some uncertainty limit, - taking image depth and crowding into account. + """Calculate the number of stars detectable above some uncertainty + limit, taking image depth and crowding into account. + + Parameters + ---------- + crowding_error : `float`, opt + The magnitude uncertainty from crowding in magnitudes. + Default 0.1 mags. + filtername : `str`, opt + The bandpass in which to calculate the crowding limit. Default r. + seeing_col : `str`, opt + The name of the seeing column. + m5_col : `str`, opt + The name of the m5 depth column. + maps : `list` [`str`], opt + Names of maps required for the metric. + ignore_crowding : `bool`, opt + Ignore the crowding limit. + + Returns + ------- + nstars : `float` + The number of stars above the error limit. """ def __init__( @@ -122,28 +150,6 @@ def __init__( ignore_crowding=False, **kwargs, ): - """ - Parameters - ---------- - crowding_error : float, optional - The magnitude uncertainty from crowding in magnitudes. Default 0.1 mags. - filtername: str, optional - The bandpass in which to calculate the crowding limit. Default r. - seeing_col : str, optional - The name of the seeing column. - m5_col : str, optional - The name of the m5 depth column. - maps : list of str, optional - Names of maps required for the metric. - ignore_crowding : bool (False) - Ignore the cowding limit. - - Returns - ------- - float - The number of stars above the error limit - """ - cols = [seeing_col, m5_col] units = "N stars" self.crowding_error = crowding_error @@ -157,13 +163,16 @@ def __init__( def run(self, data_slice, slice_point=None): pix_area = hp.nside2pixarea(slice_point["nside"], degrees=True) - # Set mag_vector to the same length as starLumFunc (lower edge of mag bins) + # Set mag_vector to the same length as starLumFunc + # (lower edge of mag bins) mag_vector = slice_point[f"starMapBins_{self.filtername}"][1:] # Pull up density of stars at this point in the sky lum_func = slice_point[f"starLumFunc_{self.filtername}"] - # Calculate the crowding error using the best seeing value (in any filter?) + # Calculate the crowding error using the best seeing value + # (in any filter?) crowd_error = _comp_crowd_error(mag_vector, lum_func, seeing=min(data_slice[self.seeing_col])) - # Locate at which point crowding error is greater than user-defined limit + # Locate at which point crowding error is greater than + # user-defined limit above_crowd = np.where(crowd_error >= self.crowding_error)[0] if np.size(above_crowd) == 0: @@ -171,7 +180,8 @@ def run(self, data_slice, slice_point=None): else: crowd_mag = mag_vector[max(above_crowd[0] - 1, 0)] - # Compute the coadded depth, and the mag where that depth hits the error specified + # Compute the coadded depth, and the mag where that depth + # hits the error specified coadded_depth = 1.25 * np.log10(np.sum(10.0 ** (0.8 * data_slice[self.m5_col]))) mag_limit = -2.5 * np.log10(1.0 / (self.crowding_error * (1.09 * 5))) + coadded_depth @@ -195,8 +205,17 @@ def run(self, data_slice, slice_point=None): class CrowdingMagUncertMetric(BaseMetric): - """ - Given a stellar magnitude, calculate the mean uncertainty on the magnitude from crowding. + """Calculate the mean uncertainty in magnitude due to crowding. + + Parameters + ---------- + rmag : `float` + The magnitude of the star to consider. + + Returns + ------- + mag_uncert : `float` + The uncertainty in magnitudes caused by crowding for a star of rmag. """ def __init__( @@ -209,18 +228,6 @@ def __init__( maps=["StellarDensityMap"], **kwargs, ): - """ - Parameters - ---------- - rmag : float - The magnitude of the star to consider. - - Returns - ------- - float - The uncertainty in magnitudes caused by crowding for a star of rmag. - """ - self.filtername = filtername self.seeing_col = seeing_col self.rmag = rmag diff --git a/rubin_sim/maf/metrics/cumulative_metric.py b/rubin_sim/maf/metrics/cumulative_metric.py index a18cfd97f..0d65da2e1 100644 --- a/rubin_sim/maf/metrics/cumulative_metric.py +++ b/rubin_sim/maf/metrics/cumulative_metric.py @@ -11,9 +11,9 @@ class CumulativeMetric(BaseMetric): Parameters ---------- - interp_points : `np.array` (None) - The points to interpolate the cumulative number of observations to. If None, - then the range of the data is used with a stepsize of 1. + interp_points : `np.array`, (N,) or None + The points to interpolate the cumulative number of observations to. + If None, then the range of the data is used with a stepsize of 1. """ def __init__( diff --git a/rubin_sim/maf/metrics/dcr_metric.py b/rubin_sim/maf/metrics/dcr_metric.py index 309f909fd..a77a31c0f 100644 --- a/rubin_sim/maf/metrics/dcr_metric.py +++ b/rubin_sim/maf/metrics/dcr_metric.py @@ -13,8 +13,9 @@ class DcrPrecisionMetric(BaseMetric): Parameters ---------- - atm_err : float - Minimum error in photometry centroids introduced by the atmosphere (arcseconds). Default 0.01. + atm_err : `float` + Minimum error in photometry centroids introduced by the atmosphere + (arcseconds). Default 0.01. """ def __init__( @@ -74,7 +75,8 @@ def run(self, data_slice, slice_point=None): # Now I want to compute the error if I interpolate/extrapolate to +/-1. # function is of form, y=ax. a=y/x. da = dy/x. - # Only strictly true if we know the unshifted position. But this should be a reasonable approx. + # Only strictly true if we know the unshifted position. + # But this should be a reasonable approx. slope_uncerts = position_errors / x_coord slope_uncerts2 = position_errors / x_coord2 @@ -82,10 +84,12 @@ def run(self, data_slice, slice_point=None): np.sum(1.0 / slope_uncerts**2) + np.sum(1.0 / slope_uncerts2**2) ) - # So, this will be the uncertainty in the RA or Dec offset at x= +/- 1. A.K.A., the uncertainty in the slope + # So, this will be the uncertainty in the RA or Dec offset at + # x= +/- 1. A.K.A., the uncertainty in the slope # of the line made by tan(zd)*sin(PA) vs RA offset # or the line tan(zd)*cos(PA) vs Dec offset - # Assuming we know the unshfted position of the object (or there's little covariance if we are fitting for both) + # Assuming we know the unshfted position of the object + # (or there's little covariance if we are fitting for both) result = total_slope_uncert return result diff --git a/rubin_sim/maf/metrics/exgal_m5.py b/rubin_sim/maf/metrics/exgal_m5.py index 37cd4ee51..d34af24ef 100644 --- a/rubin_sim/maf/metrics/exgal_m5.py +++ b/rubin_sim/maf/metrics/exgal_m5.py @@ -18,31 +18,40 @@ class ExgalM5(BaseMetric): Column name for five sigma depth. Default 'fiveSigmaDepth'. unit : `str`, optional Label for units. Default 'mag'. + + Returns + ------- + coadd_m5 : `float` + Coadded m5 value, corrected for galactic dust extinction. """ def __init__( self, m5_col="fiveSigmaDepth", metric_name="ExgalM5", units="mag", filter_col="filter", **kwargs ): - # Set the name for the dust map to use. This is gathered into the MetricBundle. + # Set the name for the dust map to use. + # This is gathered into the MetricBundle. maps = ["DustMap"] self.m5_col = m5_col self.filter_col = filter_col super().__init__( col=[self.m5_col, self.filter_col], maps=maps, metric_name=metric_name, units=units, **kwargs ) - # Set the default wavelength limits for the lsst filters. These are approximately correct. + # Set the default wavelength limits for the lsst filters. + # These are approximately correct. dust_properties = DustValues() self.ax1 = dust_properties.ax1 - # We will call Coaddm5Metric to calculate the coadded depth. Set it up here. + # We will call Coaddm5Metric to calculate the coadded depth. + # Set it up here. self.coaddm5_metric = Coaddm5Metric(m5_col=m5_col) def run(self, data_slice, slice_point): - """ - Compute the co-added m5 depth and then apply dust extinction to that magnitude. + """Compute the co-added m5 depth and then apply + dust extinction to that magnitude. """ m5 = self.coaddm5_metric.run(data_slice) if m5 == self.coaddm5_metric.badval: return self.badval - # Total dust extinction along this line of sight. Correct default A to this EBV value. + # Total dust extinction along this line of sight. + # Correct default A to this EBV value. a_x = self.ax1[data_slice[self.filter_col][0]] * slice_point["ebv"] return m5 - a_x diff --git a/rubin_sim/maf/metrics/galactic_plane_metrics.py b/rubin_sim/maf/metrics/galactic_plane_metrics.py index f9c6da0f9..02238f7d5 100644 --- a/rubin_sim/maf/metrics/galactic_plane_metrics.py +++ b/rubin_sim/maf/metrics/galactic_plane_metrics.py @@ -13,19 +13,24 @@ from .base_metric import BaseMetric -# These are a suite of metrics aimed at evaluating high-level quantities regarding galactic plane -# coverage. The metrics here evaluate the coverage (just number of visits and exposure time per filter) +# These are a suite of metrics aimed at evaluating high-level +# quantities regarding galactic plane coverage. +# The metrics here evaluate the coverage +# (just number of visits and exposure time per filter) # in relation to the desired coverage from the galactic plane priority map. -# There is a related metric in transientTimeSampling which evaluates the cadence weighted by this same map. +# There is a related metric in transientTimeSampling which +# evaluates the cadence weighted by this same map. TAU_OBS = np.array([2.0, 5.0, 11.0, 20.0, 46.5, 73.0]) def galplane_nvisits_thresholds(tau_obs, nyears=10): - """ "Return estimated nvisits required to well-sample lightcurves that need sampling every tau_obs (days). + """Return estimated nvisits required to well-sample lightcurves + that need sampling every tau_obs (days). - This does a very basic estimate, just counting how many visits you would have if you distributed them - at tau_obs intervals for a period of nyears, assuming a season length of 6.5 years and that visits in + This does a very basic estimate, just counting how many visits you + would have if you distributed them at tau_obs intervals for a period + of nyears, assuming a season length of 6.5 years and that visits in each night are in pairs. Parameters @@ -38,7 +43,8 @@ def galplane_nvisits_thresholds(tau_obs, nyears=10): Returns ------- n_visits_thresholds : `np.ndarray` - Estimated number of visits required to well sample lightcurves which require sampling on tau_obs + Estimated number of visits required to well sample lightcurves + which require sampling on tau_obs """ # How many nights in the survey nnights_total = 365.25 * nyears @@ -50,7 +56,8 @@ def galplane_nvisits_thresholds(tau_obs, nyears=10): def galplane_priority_map_thresholds(science_map): - """Return minimum threshold for priority maps, when considering filter balance. + """Return minimum threshold for priority maps, + when considering filter balance. Parameters ---------- @@ -84,23 +91,26 @@ def _nvisits_cut(obj, metricval): class GalPlaneFootprintMetric(BaseMetric): - """Evaluate the survey overlap with desired regions in the Galactic Plane - and Magellanic Clouds, by referencing the pre-computed priority maps provided. + """Evaluate the survey overlap with desired regions in the + Galactic Plane and Magellanic Clouds, by referencing the + pre-computed priority maps provided. These priority maps are keyed by science area (science_map) and per filter. The returned metric values are summed over all filters. Parameters ---------- science_map : `str` - Name of the priority footprint map key to use from the column headers contained in the - priority_GalPlane_footprint_map_data tables. + Name of the priority footprint map key to use from the column + headers contained in the priority_GalPlane_footprint_map_data tables. tau_obs : `np.ndarray` or `list` of `float`, opt - Timescales of minimum-required observations intervals for various classes of time variability. - Default (None), uses TAU_OBS. In general, this should be left as the default and consistent - across all galactic-plane oriented metrics. + Timescales of minimum-required observations intervals for + various classes of time variability. + Default (None), uses TAU_OBS. In general, this should be left as + the default and consistent across all galactic-plane oriented metrics. mag_cuts : `dict` of `float`, opt Magnitudes to use as cutoffs for individual image depths. - Default None uses a default set of values which correspond roughly to the 50th percentile. + Default None uses a default set of values which correspond + roughly to the 50th percentile. filter_col : `str`, opt Name of the filter column. Default 'filter'. m5_col : `str`, opt @@ -173,11 +183,14 @@ def __init__( self.reduce_order[r_name] = i + 2 def run(self, data_slice, slice_point): - """Calculate the number of observations that meet the mag_cut values at each slice_point. - Also calculate the number of observations * the priority map summed over all filter. - Return both of these values as a dictionary. + """Calculate the number of observations that meet the mag_cut values + at each slice_point. + + Also calculate the number of observations * the priority map summed + over all filter. Return both of these values as a dictionary. """ - # Check if we want to evaluate this part of the sky, or if the weight is below threshold. + # Check if we want to evaluate this part of the sky, + # or if the weight is below threshold. mapkey = gp_priority_map_components_to_keys("sum", self.science_map) priority = slice_point[mapkey] if priority <= self.priority_map_threshold: @@ -206,19 +219,23 @@ def reduce_n_obs_priority(self, metricval): class GalPlaneTimePerFilterMetric(BaseMetric): - """Evaluate the fraction of exposure time spent in each filter as a fraction of the - total exposure time dedicated to that healpix in the weighted galactic plane priority maps. + """Evaluate the fraction of exposure time spent in each filter as a + fraction of the total exposure time dedicated to that healpix in the + weighted galactic plane priority maps. Parameters ---------- scienceMap : `str` - Name of the priority footprint map key to use from the column headers contained in the + Name of the priority footprint map key to use from the column + headers contained in the priority_GalPlane_footprint_map_data tables. magCuts : `dict` of `float`, opt Magnitudes to use as cutoffs for individual image depths. - Default None uses a default set of values which correspond roughly to the 50th percentile. + Default None uses a default set of values which correspond + roughly to the 50th percentile. mjd_col : `str`, opt - Name of the observation start MJD column. Default 'observationStartMJD'. + Name of the observation start MJD column. + Default 'observationStartMJD'. exp_time_col : `str`, opt Name of the exposure time column. Default 'visitExposureTime'. filter_col : `str`, opt @@ -282,14 +299,17 @@ def __init__( def run(self, data_slice, slice_point): """Calculate the ratio of the actual on-sky exposure time per filter - compared to the ideal on-sky exposure time per filter at this point on the sky across all filters. + compared to the ideal on-sky exposure time per filter at this point + on the sky across all filters. """ - # Check if we want to evaluate this part of the sky, or if the weight is below threshold. + # Check if we want to evaluate this part of the sky, + # or if the weight is below threshold. weight_all_filters = slice_point[gp_priority_map_components_to_keys("sum", self.science_map)] if weight_all_filters <= self.priority_map_threshold: return self.badval - # Calculate the ideal weighting per filter compared to all filters at this point in the sky + # Calculate the ideal weighting per filter compared to all + # filters at this point in the sky relative_filter_weight = {} for f in self.filterlist: mapkey = gp_priority_map_components_to_keys(f, self.science_map) @@ -313,19 +333,21 @@ def run(self, data_slice, slice_point): # provided, and additional data in other filters is usually welcome exp_time_per_filter[f] = data_slice[self.exp_time_col][match].sum() - # Calculate the time on-sky in each filter that overlaps this point, and meets mag_cuts + # Calculate the time on-sky in each filter that overlaps this point, + # and meets mag_cuts total_expt_mag_cut = 0 for f in self.filterlist: total_expt_mag_cut += exp_time_per_filter[f].sum() - # normalize by the relative filter weight. Ideally metric results are close to 1. + # normalize by the relative filter weight. + # Ideally metric results are close to 1. normalized_exp_time = {} for f in self.filterlist: if total_expt_mag_cut == 0: normalized_exp_time[f] = 0 else: - # If no exposures are expected in this filter for this location, - # thais metric returns the mask val for this filter only. + # If no exposures are expected in this filter for this + # location, metric returns the mask val for this filter only. if relative_filter_weight[f] > 0: normalized_exp_time[f] = ( exp_time_per_filter[f] / total_expt_mag_cut / relative_filter_weight[f] diff --git a/rubin_sim/maf/metrics/galplane_time_sampling_metrics.py b/rubin_sim/maf/metrics/galplane_time_sampling_metrics.py index f5578efb5..f366fda50 100644 --- a/rubin_sim/maf/metrics/galplane_time_sampling_metrics.py +++ b/rubin_sim/maf/metrics/galplane_time_sampling_metrics.py @@ -1,8 +1,8 @@ -################################################################################################ +################################################################### # Metric to evaluate the transientTimeSamplingMetric # # Author - Rachel Street: rstreet@lco.global -################################################################################################ +################################################################### __all__ = ( "calc_interval_decay", "GalPlaneVisitIntervalsTimescaleMetric", @@ -48,17 +48,19 @@ class GalPlaneVisitIntervalsTimescaleMetric(BaseMetric): Parameters ---------- science_map : `str` - Name of the priority footprint map key to use from the column headers contained in the - priority_GalPlane_footprint_map_data tables. + Name of the priority footprint map key to use from the column + headers contained in the priority_GalPlane_footprint_map_data tables. tau_obs : `np.ndarray` or `list` of `float`, opt - Timescales of minimum-required observations intervals for various classes of time variability. - Default (None), uses TAU_OBS. In general, this should be left as the default and consistent - across all galactic-plane oriented metrics. + Timescales of minimum-required observations intervals for various + classes of time variability. + Default (None), uses TAU_OBS. In general, this should be left as the + default and consistent across all galactic-plane oriented metrics. mag_limit : `float`, opt Magnitude limit to use as a cutoff for various observations. Default 22.0. mjd_col : `str`, opt - The name of the observation start MJD column. Default 'observationStartMJD'. + The name of the observation start MJD column. + Default 'observationStartMJD'. m5_col : `str', opt The name of the five sigma depth column. Default 'fiveSigmaDepth'. """ @@ -80,7 +82,8 @@ def __init__( self.tau_obs = tau_obs else: self.tau_obs = TAU_OBS - # Create reduce functions for the class that are return the metric for each value in tau_obs + # Create reduce functions for the class that are return the metric + # for each value in tau_obs self.mag_limit = mag_limit self.mjd_col = mjd_col @@ -105,7 +108,8 @@ def __init__( self.reduce_order[f"reduceTau_{tau:.1f}".replace(".", "_").replace("reduce", "")] = i def run(self, data_slice, slice_point=None): - # Check if we want to evaluate this part of the sky, or if the weight is below threshold. + # Check if we want to evaluate this part of the sky, + # or if the weight is below threshold. if ( slice_point[gp_priority_map_components_to_keys("sum", self.science_map)] <= self.priority_map_threshold @@ -114,14 +118,16 @@ def run(self, data_slice, slice_point=None): # Select observations in the time sequence that fulfill the # S/N requirements: match = np.where(data_slice[self.m5_col] >= self.mag_limit)[0] - # We need at least two visits which match these requirements to calculate visit gaps + # We need at least two visits which match these requirements + # to calculate visit gaps if len(match) < 2: return self.badval # Find the time gaps between visits (in any filter) times = data_slice[self.mjd_col][match] times.sort() delta_tobs = np.diff(times) - # Compare the time gap distribution to the time gap required to characterize variability + # Compare the time gap distribution to the time gap required + # to characterize variability metric_data = {} for tau in self.tau_obs: # Normalize @@ -130,18 +136,20 @@ def run(self, data_slice, slice_point=None): class GalPlaneSeasonGapsTimescaleMetric(BaseMetric): - """Metric to evaluate the gap between sequential seasonal gaps in + """Evaluate the gap between sequential seasonal gaps in observations in a lightcurve relative to the scientifically desired sampling interval. Parameters ---------- science_map : `str` - Name of the priority footprint map key to use from the column headers contained in the + Name of the priority footprint map key to use from the column + headers contained in the priority_GalPlane_footprint_map_data tables. tau_var : `np.ndarray` or `list` of `float`, opt Timescales of variability for various classes of time variability. - Default (None), uses TAU_OBS * 5. In general, this should be left as the default and consistent + Default (None), uses TAU_OBS * 5. In general, this should be left + as the default and consistent across all galactic-plane oriented metrics. mag_limit : `float`, opt Magnitude limit to use as a cutoff for various observations. @@ -150,7 +158,8 @@ class GalPlaneSeasonGapsTimescaleMetric(BaseMetric): The typical season gap expected for a galactic plane field in days. The default, 145 days, is typical for a bulge field. mjd_col : `str`, opt - The name of the observation start MJD column. Default 'observationStartMJD'. + The name of the observation start MJD column. + Default 'observationStartMJD'. m5_col : `str', opt The name of the five sigma depth column. Default 'fiveSigmaDepth'. """ @@ -168,14 +177,16 @@ def __init__( self.science_map = science_map self.priority_map_threshold = galplane_priority_map_thresholds(self.science_map) # tau_obs is an array of minimum-required observation intervals for - # four categories of time variability; tau_var is the related timescale for the variability - # (tau_var is approximately 5 * tau_obs, in general) + # four categories of time variability; tau_var is the related timescale + # for the variability (tau_var is approximately 5*tau_obs, in general) if tau_var is not None: self.tau_var = tau_var else: self.tau_var = TAU_OBS * 5 - ### NOTE: I would recommend dropping tau_var 10 and 25 from this analysis unless the metric is changed - ### these intervals are so short they will *always* be dropped during the season gap + ### NOTE: I would recommend dropping tau_var 10 and 25 from this + # analysis unless the metric is changed + # these intervals are so short they will *always* be dropped + # during the season gap self.mag_limit = mag_limit self.expected_season_gap = expected_season_gap self.mjd_col = mjd_col @@ -194,7 +205,8 @@ def __init__( self.reduce_order[f"reduce_Tau_{tau:.1f}".replace(".", "_").replace("reduce", "")] = i def run(self, data_slice, slice_point): - # Check if we want to evaluate this part of the sky, or if the weight is below threshold. + # Check if we want to evaluate this part of the sky, + # or if the weight is below threshold. if ( slice_point[gp_priority_map_components_to_keys("sum", self.science_map)] <= self.priority_map_threshold @@ -204,17 +216,20 @@ def run(self, data_slice, slice_point): times = data_slice[self.mjd_col] times.sort() # data = np.sort(data_slice[self.mjd_col], order=self.mjd_col) - # SlicePoints ra/dec are always in radians - convert to degrees to calculate season + # SlicePoints ra/dec are always in radians - + # convert to degrees to calculate season seasons = calc_season(np.degrees(slice_point["ra"]), times) first_of_season, last_of_season = find_season_edges(seasons) - # season_lengths = times[last_of_season] - times[first_of_season] # would this match interval calc better? + # season_lengths = times[last_of_season] - times[first_of_season] + # would this match interval calc better? season_gaps = times[first_of_season][1:] - times[last_of_season][:-1] if len(season_gaps) == 0: return self.badval metric_data = {} for i, tau in enumerate(self.tau_var): metric_data[tau] = calc_interval_decay(season_gaps, tau) - # if the season gap is shorter than the expected season gap, count this as 'good' + # if the season gap is shorter than the expected season gap, + # count this as 'good' good_season_gaps = np.where(season_gaps <= self.expected_season_gap) metric_data[tau][good_season_gaps] = 1 metric_data[tau] = metric_data[tau].sum() / len(season_gaps) diff --git a/rubin_sim/maf/metrics/hourglass_metric.py b/rubin_sim/maf/metrics/hourglass_metric.py index b8aeaa57f..dd6b947ea 100644 --- a/rubin_sim/maf/metrics/hourglass_metric.py +++ b/rubin_sim/maf/metrics/hourglass_metric.py @@ -15,8 +15,10 @@ def nearest_val(A, val): class HourglassMetric(BaseMetric): - """Plot the filters used as a function of time. Must be used with the Hourglass Slicer. - Will totally fail in the arctic circle.""" + """Plot the filters used as a function of time. + Must be used with the Hourglass Slicer. + Will totally fail in the arctic circle. + """ def __init__( self, @@ -68,15 +70,18 @@ def run(self, data_slice, slice_point=None): perfilter["mjd"] = data_slice[self.mjd_col][good] perfilter["filter"] = data_slice[self.filter_col][good] - # brute force compute midnight times for all days between start and enc of data_slice + # brute force compute midnight times for all days between + # start and enc of data_slice times = Time(mjds, format="mjd") - # let's just find the midnight before and after each of the pre_night MJD values + # let's just find the midnight before and after each of the + # pre_night MJD values m_after = self.observer.midnight(times, "next") m_before = self.observer.midnight(times, "previous") midnights = np.unique(np.concatenate([m_before.mjd, m_after.mjd])) # calculating midnight can return nans? That seems bad. midnights = midnights[np.isfinite(midnights)] - # chop off any repeats. Need to round because observe.midnight values are not repeatable + # chop off any repeats. Need to round because observe.midnight + # values are not repeatable m10 = np.round(midnights * 10) _temp, indx = np.unique(m10, return_index=True) midnights = midnights[indx] @@ -105,12 +110,8 @@ def run(self, data_slice, slice_point=None): perfilter["midnight"] = midnights[indx] temp_indx = np.where(d1 < d2) perfilter["midnight"][temp_indx] = midnights[indx - 1][temp_indx] - try: - mtime = Time(pernight["midnight"], format="mjd") - except: - import pdb + mtime = Time(pernight["midnight"], format="mjd") - pdb.set_trace() pernight["twi12_rise"] = self.observer.twilight_morning_nautical(mtime, which="next").mjd pernight["twi12_set"] = self.observer.twilight_evening_nautical(mtime, which="previous").mjd diff --git a/rubin_sim/maf/metrics/long_gap_agn_metric.py b/rubin_sim/maf/metrics/long_gap_agn_metric.py deleted file mode 100644 index 17175a595..000000000 --- a/rubin_sim/maf/metrics/long_gap_agn_metric.py +++ /dev/null @@ -1,45 +0,0 @@ -__all__ = ("LongGapAGNMetric",) - -import numpy as np - -from .base_metric import BaseMetric - - -class LongGapAGNMetric(BaseMetric): - """max delta-t and average of the top-10 longest gaps.""" - - def __init__( - self, - metric_name="longGapAGNMetric", - mjdcol="observationStartMJD", - units="days", - xgaps=10, - badval=-666, - **kwargs, - ): - """Instantiate metric. - mjdcol = column name for exposure time dates - """ - cols = [mjdcol] - super(LongGapAGNMetric, self).__init__(cols, metric_name, units=units, **kwargs) - self.badval = badval - self.mjdcol = mjdcol - self.xgaps = xgaps - self.units = units - - def run(self, data_slice, slice_point=None): - metricval = np.diff(data_slice[self.mjdcol]) - return metricval - - def reduce_max_gap(self, metricval): - if metricval.size > 0: - result = np.max(metricval) - else: - result = self.badval - return result - - def reduce_average_longest_x_gaps(self, metricval): - if np.size(metricval) - self.xgaps > 0: - return np.average(np.sort(metricval)[np.size(metricval) - self.xgaps :]) - else: - return self.badval diff --git a/rubin_sim/maf/metrics/mo_metrics.py b/rubin_sim/maf/metrics/mo_metrics.py index 8d574e283..4b56c59b4 100644 --- a/rubin_sim/maf/metrics/mo_metrics.py +++ b/rubin_sim/maf/metrics/mo_metrics.py @@ -40,7 +40,8 @@ def _set_vis(sso_obs, snr_limit, snr_col, vis_col): class BaseMoMetric(BaseMetric): """Base class for the moving object metrics. - Intended to be used with the Moving Object Slicer.""" + Intended to be used with the Moving Object Slicer. + """ def __init__( self, @@ -67,7 +68,8 @@ def __init__( self.name = metric_name if self.name is None: self.name = self.__class__.__name__.replace("Metric", "", 1) - # Set badval and units, leave space for 'comment' (tied to display_dict). + # Set badval and units, leave space for 'comment' + # (tied to display_dict). self.badval = badval self.units = units self.comment = comment @@ -115,16 +117,17 @@ def run(self, sso_obs, orb, hval): Parameters ---------- - sso_obs: np.ndarray + sso_obs : `np.ndarray`, (N,) The input data to the metric (same as the parent metric). - orb: np.ndarray - The information about the orbit for which the metric is being calculated. - hval : float + orb : `np.ndarray`, (N,) + The information about the orbit for which the metric is + being calculated. + hval : `float` The H value for which the metric is being calculated. Returns ------- - float or np.ndarray or dict + metric_val : `float` or `np.ndarray` or `dict` """ raise NotImplementedError @@ -134,9 +137,10 @@ class BaseChildMetric(BaseMoMetric): Parameters ---------- - parentDiscoveryMetric: BaseMoMetric - The 'parent' metric which generated the metric data used to calculate this 'child' metric. - badval: float, optional + parentDiscoveryMetric : `~BaseMoMetric` + The 'parent' metric which generated the metric data used + calculate this 'child' metric. + badval : `float`, optional Value to return when metric cannot be calculated. """ @@ -154,18 +158,19 @@ def run(self, sso_obs, orb, hval, metric_values): Parameters ---------- - sso_obs: np.ndarray + sso_obs : `np.ndarray`, (N,) The input data to the metric (same as the parent metric). - orb: np.ndarray - The information about the orbit for which the metric is being calculated. - hval : float + orb : `np.ndarray`, (N,) + The information about the orbit for which the metric is + being calculated. + hval : `float` The H value for which the metric is being calculated. - metric_values : dict or np.ndarray + metric_values : `dict` or `np.ndarray`, (N,) The return value from the parent metric. Returns ------- - float + metric_val : `float` """ raise NotImplementedError @@ -173,13 +178,16 @@ def run(self, sso_obs, orb, hval, metric_values): class NObsMetric(BaseMoMetric): """ Count the total number of observations where an SSobject was 'visible'. + + Parameters + ---------- + snr_limit : `float` or None + If the snr_limit is None, detection of the object in a visit is + determined using the _calcVis method (completeness calculation). + If not None, the snr is calculated and used as a flat cutoff instead. """ def __init__(self, snr_limit=None, **kwargs): - """ - @ snr_limit .. if snr_limit is None, this uses the _calcVis method/completeness - if snr_limit is not None, this uses that value as a cutoff instead. - """ super().__init__(**kwargs) self.snr_limit = snr_limit @@ -193,9 +201,8 @@ def run(self, sso_obs, orb, hval): class NObsNoSinglesMetric(BaseMoMetric): - """ - Count the number of observations for an SSobject, without singles. - Don't include any observations where it was a single observation on a night. + """Count the number of observations for an SSobject, without singles. + Don't include observations where it was a single observation on a night. """ def __init__(self, snr_limit=None, **kwargs): @@ -217,10 +224,6 @@ class NNightsMetric(BaseMoMetric): """Count the number of distinct nights an SSobject is observed.""" def __init__(self, snr_limit=None, **kwargs): - """ - @ snr_limit : if SNRlimit is None, this uses _calcVis method/completeness - else if snr_limit is not None, it uses that value as a cutoff. - """ super().__init__(**kwargs) self.snr_limit = snr_limit @@ -233,7 +236,9 @@ def run(self, sso_obs, orb, hval): class ObsArcMetric(BaseMoMetric): - """Calculate the difference between the first and last observation of an SSobject.""" + """Calculate the difference in time between the first and last observation + of an SSobject. + """ def __init__(self, snr_limit=None, **kwargs): super().__init__(**kwargs) @@ -252,25 +257,29 @@ class DiscoveryMetric(BaseMoMetric): Parameters ---------- - n_obs_per_night : int, optional + n_obs_per_night : `int`, optional Number of observations required within a single night. Default 2. - t_min : float, optional + t_min : `float`, optional Minimum time span between observations in a single night, in days. Default 5 minutes (5/60/24). - t_max : float, optional + t_max : `float`, optional Maximum time span between observations in a single night, in days. Default 90 minutes. - n_nights_per_window : int, optional - Number of nights required with observations, within the track window. Default 3. - t_window : int, optional + n_nights_per_window : `int`, optional + Number of nights required with observations, within the track window. + Default 3. + t_window : `int`, optional Number of nights included in the track window. Default 15. - snr_limit : None or float, optional - SNR limit to use for observations. If snr_limit is None, (default), then it uses - the completeness calculation added to the 'vis' column (probabilistic visibility, - based on 5-sigma limit). If snr_limit is not None, it uses this SNR value as a cutoff. - metricName : str, optional + snr_limit : None or `float`, optional + SNR limit to use for observations. + If snr_limit is None, (default), then it uses + the completeness calculation added to the 'vis' column + (probabilistic visibility, based on 5-sigma limit). + If snr_limit is not None, it uses this SNR value as a cutoff. + metricName : `str`, optional The metric name to use. - Default will be to construct Discovery_nObsPerNightxnNightsPerWindowintWindow. + Default will be to construct + Discovery_nObsPerNightxnNightsPerWindowintWindow. """ def __init__( @@ -356,8 +365,9 @@ def run(self, sso_obs, orb, hval): tidx = np.where((dtimes >= self.t_min) & (dtimes <= self.t_max))[0] if len(tidx) > 0: good[c] = 1 - # 'good' provides mask for observations which could count as 'good to make tracklets' - # against sso_obs[vis_sort][n_idx_many]. Now identify tracklets which can make tracks. + # 'good' provides mask for observations which could count as + # 'good to make tracklets' against sso_obs[vis_sort][n_idx_many]. + # Now identify tracklets which can make tracks. good_idx = vis_sort[n_idx_many][good == 1] good_idx_ends = vis_sort[n_idx_many_end][good == 1] # print 'good tracklets', nights[good_idx] @@ -367,7 +377,8 @@ def run(self, sso_obs, orb, hval): np.roll(sso_obs[self.night_col][vis][good_idx], 1 - self.n_nights_per_window) - sso_obs[self.night_col][vis][good_idx] ) - # Identify the index in sso_obs[vis][good_idx] (sorted by mjd) where the discovery opportunity starts. + # Identify the index in sso_obs[vis][good_idx] (sorted by mjd) + # where the discovery opportunity starts. start_idxs = np.where((delta_nights >= 0) & (delta_nights <= self.t_window))[0] # Identify the index where the discovery opportunity ends. end_idxs = np.zeros(len(start_idxs), dtype="int") @@ -391,15 +402,13 @@ def run(self, sso_obs, orb, hval): class DiscoveryNChancesMetric(BaseChildMetric): """Calculate total number of discovery opportunities for an SSobject. - Retirms total number of discovery opportunities. + Returns total number of discovery opportunities. Child metric to be used with the Discovery Metric. """ def __init__( self, parent_discovery_metric, - # night_start=None, - # night_end=None, badval=0, **kwargs, ): @@ -407,7 +416,8 @@ def __init__( self.night_start = None # night_start self.night_end = None # night_end self.snr_limit = parent_discovery_metric.snr_limit - # Update the metric name to use the night_start/night_end values, if an overriding name is not given. + # Update the metric name to use the night_start/night_end values, + # if an overriding name is not given. if "metric_name" not in kwargs: if self.night_start is not None: self.name = self.name + "_n%d" % (self.night_start) @@ -415,34 +425,16 @@ def __init__( self.name = self.name + "_n%d" % (self.night_end) def run(self, sso_obs, orb, hval, metric_values): - """Return the number of different discovery chances we had for each object/H combination.""" - return metric_values["n_chances"] - """ - vis = _set_vis(sso_obs, self.snr_limit, self.snr_col, self.vis_col) - if len(vis) == 0: - return self.badval - if self.night_start is None and self.night_end is None: - return len(metric_values["start"]) - # Otherwise, we have to sort out what night the discovery chances happened on. - vis_sort = np.argsort(sso_obs[self.mjd_col][vis]) - nights = sso_obs[self.night_col][vis][vis_sort] - start_nights = nights[metric_values["start"]] - end_nights = nights[metric_values["end"]] - if self.night_end is None and self.night_start is not None: - valid = np.where(start_nights >= self.night_start)[0] - elif self.night_start is None and self.night_end is not None: - valid = np.where(end_nights <= self.night_end)[0] - else: - # And we only end up here if both were not None. - valid = np.where( - (start_nights >= self.night_start) & (end_nights <= self.night_end) - )[0] - return len(valid) + """Return the number of different discovery chances we + had for each object/H combination. """ + return metric_values["n_chances"] class DiscoveryNObsMetric(BaseChildMetric): - """Calculates the number of observations in the first discovery track of an SSobject.""" + """Calculates the number of observations in the first discovery + track of an SSobject. + """ def __init__(self, parent_discovery_metric, badval=0, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) @@ -527,7 +519,8 @@ def run(self, sso_obs, orb, hval, metric_values): class DiscoveryEclonlatMetric(BaseChildMetric): - """Returns the ecliptic lon/lat and solar elong of the first discovery track of an SSobject.""" + """Returns the ecliptic lon/lat and solar elong of the first discovery + track of an SSobject.""" def __init__(self, parent_discovery_metric, badval=None, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) @@ -574,7 +567,8 @@ class ActivityOverTimeMetric(BaseMoMetric): Counts the time periods where we would have a chance to detect activity on a moving object. - Splits observations into time periods set by 'window', then looks for observations within each window, + Splits observations into time periods set by 'window', + then looks for observations within each window, and reports what fraction of the total windows receive 'nObs' visits. """ @@ -590,7 +584,8 @@ def __init__(self, window, snr_limit=5, survey_years=10.0, metric_name=None, **k self.units = "%.1f Day Windows" % (self.window) def run(self, sso_obs, orb, hval): - # For cometary activity, expect activity at the same point in its orbit at the same time, mostly + # For cometary activity, expect activity at the same point in its + # orbit at the same time, mostly # For collisions, expect activity at random times vis = _set_vis(sso_obs, self.snr_limit, self.snr_col, self.vis_col) if len(vis) == 0: @@ -601,7 +596,8 @@ def run(self, sso_obs, orb, hval): class ActivityOverPeriodMetric(BaseMoMetric): - """Count fraction of object period we could identify activity for an SSobject. + """Count fraction of object period we could identify activity + for an SSobject. Count the fraction of the orbit (when split into n_bins) that receive observations, in order to have a chance to detect activity. @@ -638,7 +634,8 @@ def __init__( self.units = "%.1f deg" % (np.degrees(self.bin_size)) def run(self, sso_obs, orb, hval): - # For cometary activity, expect activity at the same point in its orbit at the same time, mostly + # For cometary activity, expect activity at the same point in its + # orbit at the same time, mostly # For collisions, expect activity at random times if self.a_col in orb.keys(): a = orb[self.a_col] @@ -667,15 +664,21 @@ def run(self, sso_obs, orb, hval): class MagicDiscoveryMetric(BaseMoMetric): - """Count the number of nights with discovery opportunities with very good software for an SSobject.""" + """Count the number of nights with discovery opportunities + with very good software for an SSobject. + + Parameters + ---------- + n_obs : `int`, opt + Total number of observations required for discovery. + t_window : `float`, opt + The timespan of the discovery window (days). + snr_limit : `float` or None + If None, uses the probabilistic detection likelihood. + If float, uses the SNR value as a flat cutoff value. + """ def __init__(self, n_obs=6, t_window=60, snr_limit=None, **kwargs): - """ - @ n_obs = the total number of observations required for 'discovery' - @ t_window = the timespan of the discovery window. - @ snr_limit .. if snr_limit is None then uses 'completeness' calculation, - .. if snr_limit is not None, then uses this value as a cutoff. - """ super().__init__(**kwargs) self.snr_limit = snr_limit self.n_obs = n_obs @@ -683,7 +686,6 @@ def __init__(self, n_obs=6, t_window=60, snr_limit=None, **kwargs): self.badval = 0 def run(self, sso_obs, orb, hval): - """SsoObs = Dataframe, orb=Dataframe, hval=single number.""" # Calculate visibility for this orbit at this H. vis = _set_vis(sso_obs, self.snr_limit, self.snr_col, self.vis_col) if len(vis) < self.n_obs: @@ -699,16 +701,22 @@ def run(self, sso_obs, orb, hval): class HighVelocityMetric(BaseMoMetric): """Count number of times an SSobject appears trailed. - Count the number of times an asteroid is observed with a velocity high enough to make it appear - trailed by a factor of (psf_factor)*PSF - i.e. velocity >= psf_factor * seeing / visitExpTime. + Count the number of times an asteroid is observed with a velocity + high enough to make it appear trailed by a factor of (psf_factor)*PSF - + i.e. velocity >= psf_factor * seeing / visitExpTime. Simply counts the total number of observations with high velocity. + + Parameters + ---------- + psf_factor : `float`, opt + The factor to multiply the seeing/VisitExpTime by to compare against + velocity. + snr_limit : `float` or None + If None, uses the probabilistic detection likelihood. + If float, uses the SNR value as a flat cutoff value. """ def __init__(self, psf_factor=2.0, snr_limit=None, velocity_col="velocity", **kwargs): - """ - @ psf_factor = factor to multiply seeing/visitExpTime by - (velocity(deg/day) >= 24*psf_factor*seeing(")/visitExptime(s)) - """ super().__init__(**kwargs) self.velocity_col = velocity_col self.snr_limit = snr_limit @@ -727,29 +735,35 @@ def run(self, sso_obs, orb, hval): class HighVelocityNightsMetric(BaseMoMetric): - """Count the number of discovery opportunities (via trailing) for an SSobject. + """Count the number of discovery opportunities (via trailing) for an + SSobject. - Determine the first time an asteroid is observed is observed with a velocity high enough to make - it appear trailed by a factor of psf_factor*PSF with n_obs_per_night observations within a given night. + Determine the first time an asteroid is observed is observed with a + velocity high enough to make it appear trailed by a factor of + psf_factor*PSF with n_obs_per_night observations within a given night. Parameters ---------- - psf_factor: float, optional - Object velocity (deg/day) must be >= 24 * psf_factor * seeingGeom (") / visitExpTime (s). + psf_facto r: `float`, optional + Object velocity (deg/day) must be + >= 24 * psf_factor * seeingGeom (") / visitExpTime (s). Default is 2 (i.e. object trailed over 2 psf's). - n_obs_per_night: int, optional + n_obs_per_night : `int`, optional Number of observations per night required. Default 2. - snr_limit: float or None - If snr_limit is set as a float, then requires object to be above snr_limit SNR in the image. - If snr_limit is None, this uses the probabilistic 'visibility' calculated by the vis stacker, - which means SNR ~ 5. Default is None. - velocity_col: str, optional - Name of the velocity column in the obs file. Default 'velocity'. (note this is deg/day). + snr_limit : `float` or None + If snr_limit is set as a float, then requires object to be above + snr_limit SNR in the image. + If snr_limit is None, this uses the probabilistic 'visibility' + calculated by the vis stacker, which means SNR ~ 5. + Default is None. + velocity_col : `str`, optional + Name of the velocity column in the obs file. + Default 'velocity'. (note this is deg/day). Returns ------- - float - The time of the first detection where the conditions are satisifed. + time : `float` + The time of the first detection where the conditions are satisfed. """ def __init__(self, psf_factor=2.0, n_obs_per_night=2, snr_limit=None, velocity_col="velocity", **kwargs): @@ -788,45 +802,54 @@ def run(self, sso_obs, orb, hval): class LightcurveInversionAsteroidMetric(BaseMoMetric): - """ - This metric is generally applicable to NEOs and MBAs - inner solar system objects. + """Evaluate the liklihood that the detections could be used to enable + lightcurve inversion. This metric is generally applicable only to inner + solar system objects (NEOs, MBAs). - Determine if the cumulative sum of observations of a target are enough to enable lightcurve - inversion for shape modeling. For this to be true, multiple conditions need to be + Determine if the cumulative sum of observations of a target are + enough to enable lightcurve inversion for shape modeling. + For this to be true, multiple conditions need to be satisfied: - 1) The SNR-weighted number of observations (each observation is weighted by its SNR, up to a max of 100) - must be larger than the threshhold weight_det (default 50) - 2) Ecliptic longitudinal coverage needs to be at least 90 degrees, and the absolute deviation - needs to be at least 1/8th the longitudinal coverage. + 1) The SNR-weighted number of observations (each observation is weighted + by its SNR, up to a max of 100) must be larger than the + threshold weight_det (default 50) + 2) Ecliptic longitudinal coverage needs to be at least 90 degrees, + and the absolute deviation needs to be at least 1/8th the + longitudinal coverage. 3) The phase angle coverage needs to span at least 5 degrees. - For evaluation of condition 2, the median ecliptic longitude is subtracted from all longitudes, - and the modulo 360 of those values is taken. This ensures that the wrap around 360 is handled - correctly. + For evaluation of condition 2, the median ecliptic longitude is + subtracted from all longitudes, and the modulo 360 of those values + is taken. This ensures that the wrap around 360 is handled correctly. For more information on the above conditions, please see https://docs.google.com/document/d/1GAriM7trpTS08uanjUF7PyKALB2JBTjVT7Y6R30i0-8/edit?usp=sharing - Contributed by Steve Chesley, Wes Fraser, Josef Durech, and the inner solar system working group. + + Contributed by Steve Chesley, Wes Fraser, Josef Durech, and the + inner solar system working group. Parameters ---------- - weight_det: float, optional - The SNR-weighted number of detections required (per bandpass in any ONE of the filters in filterlist). + weight_det : `float`, optional + The SNR-weighted number of detections required (per bandpass in any + ONE of the filters in filterlist). Default 50. - snr_limit: float or None, optional - If snr_limit is set as a float, then requires object to be above snr_limit SNR in the image. - If snr_limit is None, this uses the probabilistic 'visibility' calculated by the vis stacker, + snr_limit : `float` or None, optional + If snr_limit is set as a float, then requires object to be + above snr_limit SNR in the image. + If snr_limit is None, this uses the probabilistic 'visibility' + calculated by the vis stacker, which means SNR ~ 5. Default is None. - snr_max: float, optional + snr_max : `float`, optional Maximum value toward the SNR-weighting to consider. Default 100. - filterlist: list of str, optional - The filters which the lightcurve inversion could be based on. Requirements must be met in one of - these filters. + filterlist : `list` [`str`], optional + The filters which the lightcurve inversion could be based on. + Requirements must be met in one of these filters. Returns ------- - int + metric_value : `int` 0 (could not perform lightcurve inversion) or 1 (could) """ @@ -856,9 +879,11 @@ def run(self, sso_obs, orb, hval): match = np.where(sso_obs[self.filter_col] == f) snr_sum = np.sum(clip_snr[match]) / self.snr_max if snr_sum < self.weight_det: - # Do not have enough SNR-weighted observations, so skip on to the next filter. + # Do not have enough SNR-weighted observations, + # so skip on to the next filter. continue - # Is the ecliptic longitude coverage for the visible observations sufficient? + # Is the ecliptic longitude coverage for the visible + # observations sufficient? # Is the phase coverage sufficient? vis = np.where(clip_snr[match] > 0) ec_l = sso_obs["ecLon"][match][vis] @@ -869,7 +894,8 @@ def run(self, sso_obs, orb, hval): d_l = np.max(ec_l) - np.min(ec_l) # Calculate the range of the phase angle dp = np.max(phase_angle) - np.min(phase_angle) - # Metric requirement is that d_l >= 90 deg, absolute deviation is greater than d_l/8 + # Metric requirement is that d_l >= 90 deg, absolute + # deviation is greater than d_l/8 # and then that the phase coverage is more than 5 degrees. # Stop as soon as find a case where this is true. if d_l >= 90.0 and a_dev >= d_l / 8 and dp >= 5: @@ -879,40 +905,50 @@ def run(self, sso_obs, orb, hval): class ColorAsteroidMetric(BaseMoMetric): - """ - This metric is appropriate for MBAs and NEOs, and other inner solar system objects. - - The metric evaluates if the SNR-weighted number of observations are enough to - determine an approximate lightcurve and phase function -- and from this, - then a color for the asteroid can be determined. - The assumption is that you must fit the lightcurve/phase function in each bandpass, - and could do this well-enough if you have at least weight_det SNR-weighted observations - in the bandpass. - e.g. to find a g-r color, you must have 10 (SNR-weighted) obs in g and 10 in r. + """Calculate the likelihood of being able to calculate the color of an + object. This metric is appropriate for MBAs and NEOs, + and other inner solar system objects. + + The metric evaluates if the SNR-weighted number of observations are + enough to determine an approximate lightcurve and phase function -- + and from this, then a color for the asteroid can be determined. + The assumption is that you must fit the lightcurve/phase function + in each bandpass, and could do this well-enough if you have at least + weight_det SNR-weighted observations in the bandpass. + e.g. to find a g-r color, you must have 10 (SNR-weighted) obs in g + and 10 in r. For more details, see https://docs.google.com/document/d/1GAriM7trpTS08uanjUF7PyKALB2JBTjVT7Y6R30i0-8/edit?usp=sharing - Contributed by Wes Fraser, Steven Chesley & the inner solar system working group. + + Contributed by Wes Fraser, Steven Chesley + & the inner solar system working group. Parameters ---------- weight_det: float, optional - The SNR-weighted number of detections required (per bandpass in any ONE of the filters in filterlist). + The SNR-weighted number of detections required (per bandpass in any + ONE of the filters in filterlist). Default 10. snr_limit: float or None, optional - If snr_limit is set as a float, then requires object to be above snr_limit SNR in the image. - If snr_limit is None, this uses the probabilistic 'visibility' calculated by the vis stacker, + If snr_limit is set as a float, then requires object to be above + snr_limit SNR in the image. + If snr_limit is None, this uses the probabilistic 'visibility' + calculated by the vis stacker, which means SNR ~ 5. Default is None. snr_max: float, optional Maximum value toward the SNR-weighting to consider. Default 20. Returns ------- - int - An integer 'flag' that indicates whether the mean magnitude (and thus a color) was determined in: + flag : `int` + An integer 'flag' that indicates whether the mean magnitude + (and thus a color) was determined in: 0 = no bands - 1 = g and (r or i) and (z or y). i.e. obtain colors g-r or g-i PLUS g-z or g-y - 2 = Any 4 different filters (from grizy). i.e. colors = g-r, r-i, i-z, OR r-i, i-z, z-y.. + 1 = g and (r or i) and (z or y). + i.e. obtain colors g-r or g-i PLUS g-z or g-y + 2 = Any 4 different filters (from grizy). + i.e. colors = g-r, r-i, i-z, OR r-i, i-z, z-y.. 3 = All 5 from grizy. i.e. colors g-r, r-i, i-z, z-y. 4 = All 6 filters (ugrizy) -- best possible! add u-g. """ @@ -944,14 +980,17 @@ def run(self, sso_obs, orb, hval): # Now assign a flag: # 0 = no bands - # 1 = g and (r or i) and (z or y). i.e. obtain colors g-r or g-i PLUS g-z or g-y - # 2 = Any 4 different filters (from grizy). i.e. colors = g-r, r-i, i-z, OR r-i, i-z, z-y.. + # 1 = g and (r or i) and (z or y). + # i.e. obtain colors g-r or g-i PLUS g-z or g-y + # 2 = Any 4 different filters (from grizy). + # i.e. colors = g-r, r-i, i-z, OR r-i, i-z, z-y.. # 3 = All 5 from grizy. i.e. colors g-r, r-i, i-z, z-y. # 4 = All 6 filters (ugrizy) -- best possible! add u-g. all_six = set(self.filterlist) good_five = set(["g", "r", "i", "z", "y"]) - if len(filter_weight) == 0: # this lets us stop evaluating here if possible. + if len(filter_weight) == 0: + # this lets us stop evaluating here if possible. flag = 0 elif all_six.intersection(filter_weight) == all_six: flag = 4 @@ -974,38 +1013,50 @@ def run(self, sso_obs, orb, hval): class LightcurveColorOuterMetric(BaseMoMetric): - """ - This metric is appropriate for outer solar system objects, such as TNOs and SDOs. + """Calculate the liklihood of being able to calculate a color and + lightcurve for outer solar system objects. - This metric evaluates whether the number of observations is sufficient to fit a lightcurve - in a primary and secondary bandpass. The primary bandpass requires more observations than - the secondary. Essentially, it's a complete lightcurve in one or both bandpasses, with at + This metric is appropriate for outer solar system objects, + such as TNOs and SDOs. + + This metric evaluates whether the number of observations is + sufficient to fit a lightcurve in a primary and secondary bandpass. + The primary bandpass requires more observations than the secondary. + Essentially, it's a complete lightcurve in one or both bandpasses, with at least a semi-complete lightcurve in the secondary band. - The lightcurve/color can be calculated with any two of the bandpasses in filterlist. + The lightcurve/color can be calculated with any two of the + bandpasses in filterlist. + Contributed by Wes Fraser. Parameters ---------- - snr_limit: float or None, optional - If snr_limit is set as a float, then requires object to be above snr_limit SNR in the image. - If snr_limit is None, this uses the probabilistic 'visibility' calculated by the vis stacker, + snr_limit : `float` or None, optional + If snr_limit is set as a float, then requires object to be above + snr_limit SNR in the image. + If snr_limit is None, this uses the probabilistic 'visibility' + calculated by the vis stacker, which means SNR ~ 5. Default is None. - num_req: int, optional + num_req : `int`, optional Number of observations required for a lightcurve fitting. Default 30. - num_sec_filt: int, optional - Number of observations required in a secondary band for color only. Default 20. - filterlist: list of str, optional + num_sec_filt : `int`, optional + Number of observations required in a secondary band for color only. + Default 20. + filterlist : `list` [`str`], optional Filters that the primary/secondary measurements can be in. Returns ------- - int + flag : `ont` A flag that indicates whether a color/lightcurve was generated in: - 0 = no lightcurve (although may have had 'color' in one or more band) - 1 = a lightcurve in a single filter (but no additional color information) + 0 = no lightcurve + (although may have had 'color' in one or more band) + 1 = a lightcurve in a single filter + (but no additional color information) 2+ = lightcurves in more than one filter (or lightcurve + color) - e.g. lightcurve in 2 bands, with additional color information in another = 3. + e.g. lightcurve in 2 bands, + with additional color information in another = 3. """ def __init__( @@ -1046,31 +1097,35 @@ def run(self, sso_obs, orb, hval): class InstantaneousColorMetric(BaseMoMetric): - """Identify SSobjects which could have observations suitable to determine colors. + """Identify SSobjects which could have observations suitable to + determine instanteous colors. - Generally, this is not the mode LSST would work in - the lightcurves of the objects - mean that the time interval would have to be quite short. + Generally, this is not the mode LSST would work in - + the lightcurves of the objects mean that the time interval would have to + be quite short. - This is roughly defined as objects which have more than n_pairs pairs of observations - with SNR greater than snr_limit, in bands bandOne and bandTwo, within n_hours. + This is roughly defined as objects which have more than n_pairs pairs + of observations with SNR greater than snr_limit, + in bands bandOne and bandTwo, within n_hours. Parameters ---------- - n_pairs: int, optional - The number of pairs of observations (in each band) that must be within n_hours - Default 1 - snr_limit: float, optional + n_pairs : `int`, optional + The number of pairs of observations (in each band) that must be + within n_hours. Default 1. + snr_limit : `float`, optional The SNR limit for the observations. Default 10. - n_hours: float, optional - The time interval between observations in the two bandpasses (hours). Default 0.5 hours. - b_one: str, optional + n_hours : `float`, optional + The time interval between observations in the two bandpasses (hours). + Default 0.5 hours. + b_one : `str`, optional The first bandpass for the color. Default 'g'. - b_two: str, optional + b_two : `str`, optional The second bandpass for the color. Default 'r'. Returns ------- - int + flag : `int` 0 (no color possible under these constraints) or 1 (color possible). """ @@ -1117,50 +1172,64 @@ def run(self, sso_obs, orb, hval): class KnownObjectsMetric(BaseMoMetric): - """Identify SSobjects which could be classified as 'previously known' based on their peak V magnitude. - This is most appropriate for NEO surveys, where most of the sky has been covered so the exact location + """Identify SSobjects which could be classified as 'previously known' + based on their peak V magnitude. + This is most appropriate for NEO surveys, where most of the sky has + been covered so the exact location (beyond being in the visible sky) is not as important. Default parameters tuned to match NEO survey capabilities. Returns the time at which each first reached that threshold V magnitude. - The default values are calibrated using the NEOs larger than 140m discovered in the last 20 years - and assuming a 30% completeness in 2017. + The default values are calibrated using the NEOs larger than 140m + discovered in the last 20 years and assuming a 30% completeness in 2017. + + Note: the default parameteres here were set up in ~2012, and are likely + out of date (potentially adding another epoch of discovery). Parameters ----------- - elong_thresh : float, optional - The cutoff in solar elongation to consider an object 'visible'. Default 100 deg. - v_mag_thresh1 : float, optional + elong_thresh : `float`, optional + The cutoff in solar elongation to consider an object 'visible'. + Default 100 deg. + v_mag_thresh1 : `float`, optional The magnitude threshold for previously known objects. Default 20.0. - eff1 : float, optional + eff1 : `float`, optional The likelihood of actually achieving each individual input observation. - If the input observations include one observation per day, an 'eff' value of 0.3 would - mean that (on average) only one third of these observations would be achieved. - This is similar to the level for LSST, which can cover the visible sky every 3-4 days. + If the input observations include one observation per day, + an 'eff' value of 0.3 would mean that (on average) only one third + of these observations would be achieved. This is similar to the level + for LSST, which can cover the visible sky every 3-4 days. Default 0.1 - t_switch1 : float, optional - The (MJD) time to switch between v_mag_thresh1 + eff1 to v_mag_thresh2 + eff2, e.g. - the end of the first period. + t_switch1 : `float`, optional + The (MJD) time to switch between v_mag_thresh1 + eff1 to + v_mag_thresh2 + eff2, e.g. the end of the first period. Default 53371 (2005). - v_mag_thresh2 : float, optional + v_mag_thresh2 : `float`, optional The magnitude threshhold for previously known objects. Default 22.0. - This is based on assuming PS and other surveys will be efficient down to V=22. - eff2 : float, optional - The efficiency of observations during the second period of time. Default 0.1 - t_switch2 : float, optional - The (MJD) time to switch between v_mag_thresh2 + eff2 to v_mag_thresh3 + eff3. + This is based on assuming PS and other surveys will be efficient + down to V=22. + eff2 : `float`, optional + The efficiency of observations during the second period of time. + Default 0.1 + t_switch2 : `float`, optional + The (MJD) time to switch between v_mag_thresh2 + eff2 to + v_mag_thresh3 + eff3. Default 57023 (2015). - v_mag_thresh3 : float, optional - The magnitude threshold during the third period. Default 22.0, based on PS1 + Catalina. - eff3 : float, optional + v_mag_thresh3 : `float`, optional + The magnitude threshold during the third period. + Default 22.0, based on PS1 + Catalina. + eff3 : `float`, optional The efficiency of observations during the third period. Default 0.1 - t_switch3 : float, optional - The (MJD) time to switch between v_mag_thresh3 + eff3 to v_mag_thresh4 + eff4. + t_switch3 : `float`, optional + The (MJD) time to switch between v_mag_thresh3 + eff3 + to v_mag_thresh4 + eff4. Default 59580 (2022). - v_mag_thresh4 : float, optional - The magnitude threshhold during the fourth (last) period. Default 22.0, based on PS1 + Catalina. - eff4 : float, optional - The efficiency of observations during the fourth (last) period. Default 0.2 + v_mag_thresh4 : `float`, optional + The magnitude threshhold during the fourth (last) period. + Default 22.0, based on PS1 + Catalina. + eff4 : `float`, optional + The efficiency of observations during the fourth (last) period. + Default 0.2 """ def __init__( @@ -1168,7 +1237,7 @@ def __init__( elong_thresh=100.0, v_mag_thresh1=20.0, eff1=0.1, - t_switch1=53371, # XXX--maybe swap to survey_start_mjd and then delta_t's + t_switch1=53371, v_mag_thresh2=21.5, eff2=0.1, t_switch2=57023, diff --git a/rubin_sim/maf/metrics/pair_metric.py b/rubin_sim/maf/metrics/pair_metric.py index d282699a4..032f1180c 100644 --- a/rubin_sim/maf/metrics/pair_metric.py +++ b/rubin_sim/maf/metrics/pair_metric.py @@ -6,8 +6,17 @@ class PairMetric(BaseMetric): - """ - Count the number of pairs that could be used for Solar System object detection + """Count the number of pairs of visits that could be used for + Solar System object detection. + + Parameters + ---------- + match_min : `float` + Minutes after first observation to count something as a match. + match_max : `float` + Minutes after first observation to count something as a match. + bin_size : `float` + bin_size to use (minutes). """ def __init__( @@ -19,16 +28,6 @@ def __init__( bin_size=5.0, **kwargs, ): - """ - Parameters - ---------- - match_min : float (20.) - Minutes after first observation to count something as a match - match_max : float (40.) - Minutes after first observation to count something as a match - bin_size : float (5.) - bin_size to use (minutes) - """ self.mjd_col = mjd_col self.bin_size = bin_size / 60.0 / 24.0 self.match_min = match_min / 60.0 / 24.0 @@ -47,7 +46,8 @@ def run(self, data_slice, slice_point=None): nbin_max = np.round(self.match_max / self.bin_size) bins_to_check = np.arange(nbin_min, nbin_max + 1, 1) bins_w_obs = np.where(hist > 0)[0] - # now, for each bin with an observation, need to check if there is a bin + # now, for each bin with an observation, + # need to check if there is a bin # far enough ahead that is also populated. result = 0 for binadd in bins_to_check: diff --git a/rubin_sim/maf/metrics/periodic_detect_metric.py b/rubin_sim/maf/metrics/periodic_detect_metric.py index 445bfe5e7..7cb4cbd42 100644 --- a/rubin_sim/maf/metrics/periodic_detect_metric.py +++ b/rubin_sim/maf/metrics/periodic_detect_metric.py @@ -9,30 +9,38 @@ class PeriodicDetectMetric(BaseMetric): - """Determine if we would be able to classify an object as periodic/non-uniform, using an F-test - The idea here is that if a periodic source is aliased, it will be indistinguishable from a constant source, - so we can find a best-fit constant, and if the reduced chi-squared is ~1, we know we are aliased. + """Determine if we would be able to classify an object as + periodic/non-uniform, using an F-test. + + The idea here is that if a periodic source is aliased, it will be + indistinguishable from a constant source, + so we can find a best-fit constant, and if the reduced chi-squared is ~1, + we know we are aliased. Parameters ---------- - - period : float (2) or array - The period of the star (days). Can be a single value, or an array. If an array, amplitude and starMag + period : `float` or `array` + The period of the star (days). + Can be a single value, or an array. If an array, amplitude and starMag should be arrays of equal length. - amplitude : floar (0.1) - The amplitude of the stellar variablility (mags). - starMag : float (20.) + amplitude : `float` + The amplitude of the stellar variability, (mags). + starMag : `float` The mean magnitude of the star in r (mags). - sig_level : float (0.05) - The value to use to compare to the p-value when deciding if we can reject the null hypothesis. - sed_template : str ('F') - The stellar SED template to use to generate realistic colors (default is an F star, so RR Lyrae-like) + sig_level : `float` + The value to use to compare to the p-value when deciding + if we can reject the null hypothesis. + sed_template : `str` + The stellar SED template to use to generate realistic colors + (default is an F star, so RR Lyrae-like) Returns ------- - - 1 if we would detect star is variable, 0 if it is well-fit by a constant value. If using arrays to test multiple - period-amplitude-mag combinations, will be the sum of the number of detected stars. + flag : `int` + Returns 1 if we would detect star is variable, + 0 if it is well-fit by a constant value. + If using arrays to test multiple period-amplitude-mag combinations, + will be the sum of the number of detected stars. """ def __init__( @@ -53,7 +61,8 @@ def __init__( self.filter_col = filter_col if np.size(periods) == 1: self.periods = [periods] - # Using the same magnitude for all filters. Could expand to fit the mean in each filter. + # Using the same magnitude for all filters. + # Could expand to fit the mean in each filter. self.star_mags = [star_mags] self.amplitudes = [amplitudes] else: @@ -99,11 +108,14 @@ def run(self, data_slice, slice_point=None): weights = 1.0 / (delta_m**2) weighted_mean = np.sum(weights * lc) / np.sum(weights) chi_sq_1 += np.sum(((lc - weighted_mean) ** 2 / delta_m**2)) - # Yes, I'm fitting magnitudes rather than flux. At least I feel kinda bad about it. - # F-test for nested models Regression problems: https://en.wikipedia.org/wiki/F-test + # Yes, I'm fitting magnitudes rather than flux. + # At least I feel kinda bad about it. + # F-test for nested models Regression problems: + # https://en.wikipedia.org/wiki/F-test f_numerator = (chi_sq_1 - chi_sq_2) / (p2 - p1) f_denom = 1.0 - # This is just reduced chi-squared for the more complicated model, so should be 1. + # This is just reduced chi-squared for the more + # complicated model, so should be 1. f_val = f_numerator / f_denom # Has DoF (p2-p1, n-p2) # https://stackoverflow.com/questions/21494141/how-do-i-do-a-f-test-in-python/21503346 diff --git a/rubin_sim/maf/metrics/phase_gap_metric.py b/rubin_sim/maf/metrics/phase_gap_metric.py index 8ae527189..8ec0142f1 100644 --- a/rubin_sim/maf/metrics/phase_gap_metric.py +++ b/rubin_sim/maf/metrics/phase_gap_metric.py @@ -8,21 +8,26 @@ class PhaseGapMetric(BaseMetric): - """ - Measure the maximum gap in phase coverage for observations of periodic variables. + """Measure the maximum gap in phase coverage for + observations of periodic variables. Parameters ---------- - col: str, optional + col : `str`, optional Name of the column to use for the observation times (MJD) - n_periods: int, optional + n_periods : `int`, optional Number of periods to test - period_min: float, optional + period_min : `float`, optional Minimum period to test, in days. - period_max: float, optional + period_max : `float`, optional Maximum period to test, in days - n_visits_min: int, optional + n_visits_min : `int`, optional Minimum number of visits necessary before looking for the phase gap. + + Returns + ------- + metric_value : `dict` {`periods`: `float`, `maxGaps` : `float`} + Calculates a dictionary of max gap in phase coverage for each period. """ def __init__( @@ -91,8 +96,11 @@ def reduce_largest_gap(self, metric_val): return np.max(metric_val["maxGaps"]) -# To fit a periodic source well, you need to cover the full phase, and fit the amplitude. +# To fit a periodic source well, you need to cover the full phase, +# and fit the amplitude. class PeriodicQualityMetric(BaseMetric): + """Evaluate phase coverage over a given period. + """ def __init__( self, mjd_col="observationStartMJD", @@ -111,7 +119,8 @@ def __init__( ) def _calc_phase(self, data_slice): - """1 is perfectly balanced phase coverage, 0 is no effective coverage.""" + """1 is perfectly balanced phase coverage, 0 is no effective coverage. + """ angles = data_slice[self.mjd_col] % self.period angles = angles / self.period * 2.0 * np.pi x = np.cos(angles) @@ -125,7 +134,9 @@ def _calc_phase(self, data_slice): return 1.0 - vector_off def _calc_amp(self, data_slice): - """Fractional SNR on the amplitude, testing for a variety of possible phases""" + """Fractional SNR on the amplitude, + testing for a variety of possible phases. + """ phases = np.arange(0, np.pi, np.pi / 8.0) snr = m52snr(self.star_mag, data_slice[self.m5_col]) amp_snrs = np.sin(data_slice[self.mjd_col] / self.period * 2 * np.pi + phases[:, np.newaxis]) * snr diff --git a/rubin_sim/maf/metrics/qso_number_counts_metric.py b/rubin_sim/maf/metrics/qso_number_counts_metric.py index be056a5da..6900c95aa 100644 --- a/rubin_sim/maf/metrics/qso_number_counts_metric.py +++ b/rubin_sim/maf/metrics/qso_number_counts_metric.py @@ -12,13 +12,16 @@ class QSONumberCountsMetric(BaseMetric): - """ - Calculate the number of quasars expected with SNR>=5 according to the Shen et al. (2020) QLF - - model A in the redshift range zmin < z < zmax. The 5 sigma depths are obtained using the ExgalM5 metric. + """Calculate the number of quasars expected with SNR>=5 + according to the Shen et al. (2020) QLF - model A in the redshift + range zmin < z < zmax. + + The 5 sigma depths are obtained using the ExgalM5 metric. Only quasars fainter than the saturation magnitude are counted. - By default, zmin is 0.3 and zmax is the minimum between 6.7 and the redshift at which the Lyman break - matches the effective wavelength of the band. For bands izy, zmax is 6.7. This default choice is to + By default, zmin is 0.3 and zmax is the minimum between 6.7 and the + redshift at which the Lyman break matches the effective wavelength + of the band. For bands izy, zmax is 6.7. This default choice is to match Table 10.2 for i-band quasar counts in the LSST Science book. """ @@ -47,7 +50,8 @@ def __init__( "y": 971.0, } - # Dust Extinction limit. Regions with larger extinction and dropped from the counting. + # Dust Extinction limit. + # Regions with larger extinction and dropped from the counting. self.extinction_cut = extinction_cut # Save the filter information. @@ -55,9 +59,11 @@ def __init__( self.lsst_filter = lsst_filter # Save zmin and zmax, or set zmax to the default value. - # The default zmax is the lower number between 6.7 and the redshift at which the - # Lyman break (91.2nm) hits the effective wavelength of the filter. - # Note that this means that for i, z and y the default value for zmax is 6.7 + # The default zmax is the lower number between 6.7 and the + # redshift at which the Lyman break (91.2nm) hits the + # effective wavelength of the filter. + # Note that this means that for i, z and y, + # the default value for zmax is 6.7 self.zmin = zmin if zmax is None: zmax = np.min([6.7, self.effwavelen[self.lsst_filter] / 91.2 - 1.0]) @@ -71,8 +77,9 @@ def __init__( self.qlf_model = qlf_model self.sed_model = sed_model - # Read the long tables, which the number of quasars expected for a given band, - # qlf_module and qlf_model in a range of redshifts and magnitudes. + # Read the long tables, which the number of quasars expected + # for a given band, qlf_module and qlf_model in a range of + # redshifts and magnitudes. table_name = "Long_Table.LSST{0}.{1}.{2}.{3}.txt".format( self.lsst_filter, self.qlf_module, self.qlf_model, self.sed_model ) @@ -90,7 +97,8 @@ def __init__( c_mz_data = np.cumsum(c_mz_data, axis=1) # Create a 2D interpolation object for the long table. - # self.nqso_cumulative = interpolate.interp2d(zs[:-1], mags[:-1], #c_mz_data[:-1, :-1], kind="cubic") + # self.nqso_cumulative = interpolate.interp2d(zs[:-1], mags[:-1], + # #c_mz_data[:-1, :-1], kind="cubic") self.nqso_cumulative_aux = interpolate.RectBivariateSpline( zs[:-1], mags[:-1], c_mz_data[:-1, :-1].T, kx=3, ky=3 ) diff --git a/rubin_sim/maf/metrics/scaling_metrics.py b/rubin_sim/maf/metrics/scaling_metrics.py index cb25a1ca5..1589000f7 100644 --- a/rubin_sim/maf/metrics/scaling_metrics.py +++ b/rubin_sim/maf/metrics/scaling_metrics.py @@ -14,13 +14,13 @@ class NgalScaleMetric(BaseMetric): Parameters ---------- - a_max : float (0.2) + a_max : `float` The maximum dust extinction to allow. Anything with higher dust extinction is considered to have zero usable galaxies. - m5min : float (26) + m5min : `float` The minimum coadded 5-sigma depth to allow. Anything less is considered to have zero usable galaxies. - filter : str ("i") + filter : `str` The filter to use. Any visits in other filters are ignored. """ @@ -55,7 +55,8 @@ def __init__( self.ax1 = dust_properties.ax1 def run(self, data_slice, slice_point): - # I'm a little confused why there's a dust cut and an M5 cut, but whatever + # I'm a little confused why there's a dust cut and an M5 cut, + # but whatever a_x = self.ax1[data_slice[self.filter_col][0]] * slice_point["ebv"] if a_x > self.a_max: return 0 @@ -80,16 +81,18 @@ class NlcPointsMetric(BaseMetric): Parameters ---------- - ndpmin : int (10) + ndpmin : `int` The number of points to demand on a lightcurve in a single filter to have that light curve qualify. - mags : float (21) + mags : `float` The magnitude of our fiducial object (maybe make it a dict in the future to support arbitrary colors). - maps : list of map objects (None) - List of stellar density maps to use. Default of None loads Trilegal maps. - nside : int (128) - The nside is needed to make sure the loaded maps match the slicer nside. + maps : `list` [`~rubin_sim.maf.map`] or None + List of stellar density maps to use. + Default of None loads Trilegal maps. + nside : `int` + The nside is needed to make sure the loaded maps + match the slicer nside. """ def __init__( diff --git a/rubin_sim/maf/metrics/season_metrics.py b/rubin_sim/maf/metrics/season_metrics.py index f386990c4..229d0ad56 100644 --- a/rubin_sim/maf/metrics/season_metrics.py +++ b/rubin_sim/maf/metrics/season_metrics.py @@ -1,5 +1,6 @@ -"""A group of metrics that work together to evaluate season characteristics (length, number, etc). -In addition, these supports the time delay metric calculation for strong lensing. +"""A group of metrics that work together to evaluate season characteristics +(length, number, etc). +In addition, these support the time delay metric for strong lensing. """ __all__ = ( "find_season_edges", @@ -22,13 +23,13 @@ def find_season_edges(seasons): Parameters ---------- - seasons: np.ndarray + seasons : `np.ndarray`, (N,) Seasons, such as calculated by calc_season. Note that seasons should be sorted!! Returns ------- - np.ndarray, np.ndarray + first, last : `np.ndarray`, (N,), `np.ndarray`, (N,) The indexes of the first and last date in the season. """ int_seasons = np.floor(seasons) @@ -41,17 +42,24 @@ def find_season_edges(seasons): class SeasonLengthMetric(BaseMetric): - """ - Calculate the length of LSST seasons, in days. + """Calculate the length of LSST seasons, in days. Parameters ---------- - min_exp_time: float, optional - Minimum visit exposure time to count for a 'visit', in seconds. Default 20. + min_exp_time : `float`, optional + Minimum visit exposure time to count for a 'visit', in seconds. + Default 20. reduce_func : function, optional - Function that can operate on array-like structures. Typically numpy function. - This reduces the season length in each season from 10 separate values to a single value. + Function that can operate on array-like structures. + Typically numpy function. + This reduces the season length in each season from 10 separate + values to a single value. Default np.median. + + Returns + ------- + seasonlength : `float` + The (reduceFunc) of the length of each season, in days. """ def __init__( @@ -73,28 +81,13 @@ def __init__( ) def run(self, data_slice, slice_point): - """Calculate the (reduceFunc) of the length of each season. - Uses the slice_point RA/Dec to calculate the position in question, then uses the times of the visits - to assign them into seasons (based on where the sun is relative to the slice_point RA). - - Parameters - ---------- - data_slice : numpy.array - Numpy structured array containing the data related to the visits provided by the slicer. - slice_point : dict - Dictionary containing information about the slice_point currently active in the slicer. - - Returns - ------- - float - The (reduceFunc) of the length of each season, in days. - """ # Order data Slice/times and exclude visits which are too short. long = np.where(data_slice[self.exp_time_col] > self.min_exp_time) if len(long[0]) == 0: return self.badval data = np.sort(data_slice[long], order=self.mjd_col) - # SlicePoints ra/dec are always in radians - convert to degrees to calculate season + # SlicePoints ra/dec are always in radians - + # convert to degrees to calculate season seasons = calc_season(np.degrees(slice_point["ra"]), data[self.mjd_col]) first_of_season, last_of_season = find_season_edges(seasons) seasonlengths = data[self.mjd_col][last_of_season] - data[self.mjd_col][first_of_season] @@ -103,7 +96,8 @@ def run(self, data_slice, slice_point): class CampaignLengthMetric(BaseMetric): - """Calculate the number of seasons (roughly, years) a pointing is observed for. + """Calculate the number of seasons (roughly, years) a pointing is observed. + This corresponds to the 'campaign length' for lensed quasar time delays. """ @@ -129,7 +123,9 @@ def run(self, data_slice, slice_point): class MeanCampaignFrequencyMetric(BaseMetric): - """Calculate the mean separation between nights, within a season - then the mean over the campaign. + """Calculate the mean separation between nights, within a season - + then the mean over the campaign. + Calculate per season, to avoid any influence from season gaps. """ @@ -154,7 +150,8 @@ def run(self, data_slice, slice_point): if len(long[0]) == 0: return self.badval data = np.sort(data_slice[long], order=self.mjd_col) - # SlicePoints ra/dec are always in radians - convert to degrees to calculate season + # SlicePoints ra/dec are always in radians - + # convert to degrees to calculate season seasons = calc_season(np.degrees(slice_point["ra"]), data[self.mjd_col]) first_of_season, last_of_season = find_season_edges(seasons) season_means = np.zeros(len(first_of_season), float) @@ -168,49 +165,54 @@ def run(self, data_slice, slice_point): class TdcMetric(BaseMetric): - """Calculate the Time Delay Challenge metric, as described in Liao et al 2015 - (https://arxiv.org/pdf/1409.1254.pdf). + """Calculate the Time Delay Challenge metric, + as described in Liao et al 2015 (https://arxiv.org/pdf/1409.1254.pdf). - This combines the MeanCampaignFrequency/MeanNightSeparation, the SeasonLength, and the CampaignLength + This combines the MeanCampaignFrequency/MeanNightSeparation, + the SeasonLength, and the CampaignLength metrics above, but rewritten to calculate season information only once. cad_norm = in units of days sea_norm = in units of months camp_norm = in units of years - This metric also adds a requirement to achieve limiting magnitudes after galactic dust extinction, - in various bandpasses, in order to exclude visits which are not useful for detecting quasars - (due to being short or having high sky brightness, etc.) and to reject regions with - high galactic dust extinction. + This metric also adds a requirement to achieve limiting magnitudes + after galactic dust extinction, in various bandpasses, + in order to exclude visits which are not useful for detecting quasars + (due to being short or having high sky brightness, etc.) and to + reject regions with high galactic dust extinction. Parameters ---------- - mjd_col: str, optional + mjd_col : `str`, optional Column name for mjd. Default observationStartMJD. - night_col: str, optional + night_col : `str`, optional Column name for night. Default night. - filter_col: str, optional + filter_col : `str`, optional Column name for filter. Default filter. - m5_col: str, optional + m5_col : `str`, optional Column name for five-sigma depth. Default fiveSigmaDepth. - mag_cuts: dict, optional - Dictionary with filtername:mag limit (after dust extinction). Default None in kwarg. - Defaults set within metric: {'u': 22.7, 'g': 24.1, 'r': 23.7, 'i': 23.1, 'z': 22.2, 'y': 21.4} - metricName: str, optional + mag_cuts : `dict`, optional + Dictionary with filtername:mag limit (after dust extinction). + Default None in kwarg. + Defaults set within metric: + {'u': 22.7, 'g': 24.1, 'r': 23.7, 'i': 23.1, 'z': 22.2, 'y': 21.4} + metricName : `str`, optional Metric Name. Default TDC. - cad_norm: float, optional + cad_norm : `float`, optional Cadence normalization constant, in units of days. Default 3. - sea_norm: float, optional + sea_norm : `float`, optional Season normalization constant, in units of months. Default 4. - camp_norm: float, optional + camp_norm : `float`, optional Campaign length normalization constant, in units of years. Default 5. - badval: float, optional + badval : `float`, optional Return this value instead of the dictionary for bad points. Returns ------- - dictionary - Dictionary of values for {'rate', 'precision', 'accuracy'} at this point in the sky. + TDCmetrics : `dict` + Dictionary of values for {'rate', 'precision', 'accuracy'} + at this point in the sky. """ def __init__( @@ -250,7 +252,8 @@ def __init__( raise Exception("mag_cuts should be a dictionary") # Set up dust map requirement maps = ["DustMap"] - # Set the default wavelength limits for the lsst filters. These are approximately correct. + # Set the default wavelength limits for the lsst filters. + # These are approximately correct. dust_properties = DustValues() self.ax1 = dust_properties.ax1 super().__init__( @@ -275,7 +278,8 @@ def run(self, data_slice, slice_point): if len(idxs[0]) == 0: return self.badval data = np.sort(data_slice[idxs], order=self.mjd_col) - # SlicePoints ra/dec are always in radians - convert to degrees to calculate season + # SlicePoints ra/dec are always in radians - + # convert to degrees to calculate season seasons = calc_season(np.degrees(slice_point["ra"]), data[self.mjd_col]) int_seasons = np.floor(seasons) first_of_season, last_of_season = find_season_edges(seasons) diff --git a/rubin_sim/maf/metrics/simple_metrics.py b/rubin_sim/maf/metrics/simple_metrics.py index 134f12cad..2621f51e4 100644 --- a/rubin_sim/maf/metrics/simple_metrics.py +++ b/rubin_sim/maf/metrics/simple_metrics.py @@ -290,7 +290,9 @@ def run(self, data_slice, slice_point=None): class MaxPercentMetric(BaseMetric): - """Return the percent of data which matches the maximum value of the data.""" + """Return the percent of data which matches the maximum value + of the data. + """ def run(self, data_slice, slice_point=None): n_max = np.size(np.where(data_slice[self.colname] == np.max(data_slice[self.colname]))[0]) @@ -324,8 +326,9 @@ class FracAboveMetric(BaseMetric): """Find the fraction of data values above a given `cutoff`.""" def __init__(self, col=None, cutoff=0.5, scale=1, metric_name=None, **kwargs): - # Col could just get passed in bundle with kwargs, but by explicitly pulling it out - # first, we support use cases where class instantiated without explicit 'col='). + # Col could just get passed in bundle with kwargs, + # by explicitly pulling it out first, we support use cases where + # class instantiated without explicit 'col='). if metric_name is None: metric_name = "FracAbove %.2f in %s" % (cutoff, col) super(FracAboveMetric, self).__init__(col, metric_name=metric_name, **kwargs) diff --git a/rubin_sim/maf/metrics/slew_metrics.py b/rubin_sim/maf/metrics/slew_metrics.py deleted file mode 100644 index 7484079f3..000000000 --- a/rubin_sim/maf/metrics/slew_metrics.py +++ /dev/null @@ -1,71 +0,0 @@ -__all__ = ("SlewContributionMetric", "AveSlewFracMetric") - -import numpy as np - -from .base_metric import BaseMetric - -# Metrics for dealing with things from the SlewActivities table - - -class SlewContributionMetric(BaseMetric): - def __init__( - self, col="actDelay", activity=None, active_col="activity", in_crit_col="inCriticalPath", **kwargs - ): - """ - Return the average time, multiplied by fraction of slew -- - considering critical path activities only. - """ - self.col = col - self.in_crit_col = in_crit_col - col = [col, in_crit_col] - col.append(active_col) - self.active_col = active_col - self.activity = activity - super(SlewContributionMetric, self).__init__(col=col, **kwargs) - self.comment = "Average time for %s activity (in seconds) when in the critical path, " % (activity) - self.comment += "multiplied by the percent of total slews in the critical path." - - def run(self, data_slice, slice_point=None): - # Activities of this type, in critical path. - good_in_crit = np.where( - (data_slice[self.active_col] == self.activity) & (data_slice[self.in_crit_col] == "True") - )[0] - if len(good_in_crit) == 0: - result = 0.0 - else: - # All activities in critical path. - in_crit = np.where((data_slice[self.in_crit_col] == "True"))[0] - # Calculate fraction of total in-critical-path slew activities that this activity represents. - result = np.sum(data_slice[self.col][good_in_crit]) / np.sum(data_slice[self.col][in_crit]) - # and multiply by the mean time required by this activity. - result *= np.mean(data_slice[self.col][good_in_crit]) - return result - - -class AveSlewFracMetric(BaseMetric): - def __init__( - self, col="actDelay", activity=None, active_col="activity", id_col="SlewHistory_slewCount", **kwargs - ): - """ - Return the average time multiplied by fraction of slews. - """ - self.col = col - self.id_col = id_col - col = [col, id_col] - col.append(active_col) - self.active_col = active_col - self.activity = activity - super(AveSlewFracMetric, self).__init__(col=col, **kwargs) - self.comment = "Average time for %s activity (in seconds), multiplied by percent of total slews." % ( - activity - ) - - def run(self, data_slice, slice_point=None): - good = np.where(data_slice[self.active_col] == self.activity)[0] - if len(good) == 0: - result = 0.0 - else: - result = np.mean(data_slice[self.col][good]) - nslews = np.size(np.unique(data_slice[self.id_col])) - result = result * np.size(good) / np.float(nslews) - return result diff --git a/rubin_sim/maf/metrics/sn_cadence_metric.py b/rubin_sim/maf/metrics/sn_cadence_metric.py index 580d91ca1..201ae2cd7 100644 --- a/rubin_sim/maf/metrics/sn_cadence_metric.py +++ b/rubin_sim/maf/metrics/sn_cadence_metric.py @@ -7,16 +7,17 @@ class SNCadenceMetric(metrics.BaseMetric): """ - Metric to estimate the redshift limit for faint supernovae (x1,color) = (-2.0,0.2) + Metric to estimate the redshift limit for faint supernovae + (x1,color) = (-2.0,0.2) Parameters ---------- - list : str, optional + list : `str`, optional Name of the columns used to estimate the metric - coadd : bool, optional + coadd : `bool`, optional to make "coaddition" per night (uses snStacker) Default True - lim_sn : class, optional + lim_sn : `class`, optional Reference data used to estimate redshift values (interpolation) """ diff --git a/rubin_sim/maf/metrics/sn_n_sn_metric.py b/rubin_sim/maf/metrics/sn_n_sn_metric.py index c7103e456..c7edf7244 100644 --- a/rubin_sim/maf/metrics/sn_n_sn_metric.py +++ b/rubin_sim/maf/metrics/sn_n_sn_metric.py @@ -69,7 +69,8 @@ class SNNSNMetric(BaseMetric): dust : `bool`, opt Apply dust extinction to visit depth values (default False) hard_dust_cut : `float`, opt - If set, cut any point on the sky that has an ebv extinction higher than the hard_dust_cut value. + If set, cut any point on the sky that has an ebv extinction + higher than the hard_dust_cut value. Default 0.25 """ @@ -221,14 +222,14 @@ def run(self, data_slice, slice_point): Parameters -------------- - data_slice: `np.array` + data_slice : `np.ndarray` Observations to process (scheduler simulations) - slice_point: `bool`, opt + slice_point : `bool`, opt Information about the location on the sky from the slicer Returns ------- - metricVal : `np.recarray` + metricVal : `np.ndarray` ['n_sn', 'zlim'] at this point on the sky """ # Hard dust cut @@ -264,7 +265,8 @@ def run(self, data_slice, slice_point): if len(data_slice) <= self.n_aft + self.n_bef: return self.badval - # get season information (seasons calculated by gaps, not by place on sky) + # get season information (seasons calculated by gaps, + # not by place on sky) data_slice = self.getseason(data_slice, mjd_col=self.mjd_col) # get redshift values per season @@ -326,17 +328,17 @@ def season_length(self, seasons, data_slice, zseason): Method to estimate season lengths vs z Parameters - --------------- - seasons : list(int) + ----------- + seasons : `list` [`int`] list of seasons to process - data_slice: numpy array + data_slice : `np.ndarray`, (N,)` array of observations Returns - ----------- - seasons : list(int) + -------- + seasons : `list` [`int`] list of seasons to process - dur_z : pandas df + dur_z : `pandas.DataFrame` season lengths vs z """ # if seasons = -1: process the seasons seen in data @@ -368,15 +370,15 @@ def get_season_info(self, dfa, zseason, min_duration=60.0): Parameters -------------- - dfa: pandas df + dfa : pandas df dat to process - zseason: pandas df + zseason : pandas df redshift infos per season - min_duration: float, opt + min_duration : `float`, opt min season length to be accepted (default: 60 days) Returns - ---------- + -------- pandas df with season length infos """ @@ -406,13 +408,13 @@ def step_lc(self, obs, gen_par, x1=-2.0, color=0.2): Parameters --------------- - obs: array + obs : array observations - gen_par: array + gen_par : array simulation parameters - x1: float, opt + x1 : `float`, opt stretch value (default: -2.0) - color: float, opt + color : `float`, opt color value (default: 0.2) Returns @@ -437,9 +439,6 @@ def step_efficiencies(self, lc): pandas df with efficiencies """ - # sn_effis = lc.groupby(['healpixID', 'season', 'z', 'x1', 'color', 'sntype']).apply( - # lambda x: self.sn_effi(x)).reset_index() - sn_effis = ( lc.groupby(["season", "z", "x1", "color", "sntype"]) .apply(lambda x: self.sn_effi(x)) @@ -476,14 +475,14 @@ def step_nsn(self, sn_effis, dur_z): Method to estimate the number of supernovae from efficiencies Parameters - --------------- - sn_effis: pandas df + ---------- + sn_effis : pandas df data with efficiencies of observation - dur_z: array + dur_z : array array of season length Returns - ---------- + ------- initial sn_effis appended with a set of infos (duration, nsn) """ @@ -501,8 +500,8 @@ def season_info(self, grp, min_duration): Parameters -------------- - grp: pandas df group - min_duration: float + grp : pandas df group + min_duration : `float` minimal duration for a season to be considered Returns @@ -543,9 +542,9 @@ def duration_z(self, grp, min_duration=60.0): Parameters -------------- - grp: pandas df group + grp : pandas df group data to process: season infos - min_duration: float, opt + min_duration : `float`, opt min season length for a season to be processed (deafult: 60 days) Returns @@ -581,11 +580,11 @@ def calc_daymax(self, grp, daymax_step): Parameters -------------- grp: group (pandas df sense) - group of data to process with the following cols: - t0_min: T0 min value (per season) - t0_max: T0 max value (per season) - daymax_step: float - step for T0 simulation + group of data to process with the following cols: + t0_min: T0 min value (per season) + t0_max: T0 max value (per season) + daymax_step: `float` + step for T0 simulation Returns ---------- @@ -615,13 +614,13 @@ def gen_lc(self, grp, gen_par_orig, x1, color): Parameters --------------- - grp: pandas group + grp : pandas group observations to process - gen_par_orig: pandas df + gen_par_orig : pandas df simulation parameters - x1: float + x1 : `float` SN stretch - color: float + color : `float` SN color Returns @@ -656,7 +655,7 @@ def sn_effi(self, lc): Parameters --------------- - lc: pandas grp + lc : pandas grp light curve Returns @@ -721,13 +720,13 @@ def get_sum(self, lcarr, varname, nvals, flag): Parameters -------------- - lcarr: numpy array + lcarr : numpy array data to process - varname: str + varname : `str` col to process in lcarr - nvals: int + nvals : `int` dimension for tiling - flag: array(bool) + flag : array(bool) flag to apply Returns @@ -748,11 +747,11 @@ def get_epochs(self, nights, flag, flagph): Parameters --------------- - nights: array + nights : array night number array - flag: array(bool) + flag : array(bool) flag to apply - flagph: array(bool) + flagph : array(bool) flag to apply Returns @@ -1316,7 +1315,8 @@ def get_nsn(self, effi, durinterp_z, zmin, zmax, zstep): def check_dur_z(self, dur_z, nmin=2): """ " - Method to remove seasons with a poor redshift range due to too low season length + Method to remove seasons with a poor redshift range due + to too low season length Parameters ---------------- diff --git a/rubin_sim/maf/plots/spatial_plotters.py b/rubin_sim/maf/plots/spatial_plotters.py index b0f15ef8d..02fae6b07 100644 --- a/rubin_sim/maf/plots/spatial_plotters.py +++ b/rubin_sim/maf/plots/spatial_plotters.py @@ -145,16 +145,18 @@ def __call__(self, metric_value_in, slicer, user_plot_dict, fignum=None): """ Parameters ---------- - metric_value : numpy.ma.MaskedArray - slicer : rubin_sim.maf.slicers.HealpixSlicer - user_plot_dict: dict - Dictionary of plot parameters set by user (overrides default values). - fignum : int - Matplotlib figure number to use (default = None, starts new figure). + metric_value : `numpy.ma.MaskedArray` + slicer : `rubin_sim.maf.slicers.HealpixSlicer` + user_plot_dict: `dict` + Dictionary of plot parameters set by user + (overrides default values). + fignum : `int` + Matplotlib figure number to use + (default = None, starts new figure). Returns ------- - int + fignum : `int` Matplotlib figure number used to create the plot. """ # Override the default plotting parameters with user specified values. @@ -248,7 +250,8 @@ def __call__(self, metric_value_in, slicer, user_plot_dict, fignum=None): visufunc_params.update(self.healpy_visufunc_params) self.healpy_visufunc(metric_value.filled(badval), **visufunc_params) - # Add colorbar (not using healpy default colorbar because we want more tickmarks). + # Add colorbar + # (not using healpy default colorbar because we want more tickmarks). self.ax = plt.gca() im = self.ax.get_images()[0] @@ -259,12 +262,12 @@ def __call__(self, metric_value_in, slicer, user_plot_dict, fignum=None): # Add label. if plot_dict["label"] is not None: plt.figtext(0.8, 0.8, "%s" % (plot_dict["label"])) - # Make a color bar. Supress silly colorbar warnings. + # Make a color bar. Suppress excessive colorbar warnings. with warnings.catch_warnings(): warnings.simplefilter("ignore") # The vertical colorbar is primarily aimed at the movie # but may be useful for other purposes - if plot_dict["extend"] is not "neither": + if plot_dict["extend"] != "neither": extendrect = False else: extendrect = True @@ -315,8 +318,8 @@ def __init__(self): self.default_plot_dict.update({"maxl": None, "removeDipole": True, "linestyle": "-"}) def __call__(self, metric_value, slicer, user_plot_dict, fignum=None): - """ - Generate and plot the power spectrum of metric_value (calculated on a healpix grid). + """Generate and plot the power spectrum of metric_values + (for metrics calculated on a healpix grid). """ if "Healpix" not in slicer.slicer_name: raise ValueError("HealpixPowerSpectrum for use with healpix metricBundles.") @@ -361,7 +364,8 @@ def __call__(self, metric_value, slicer, user_plot_dict, fignum=None): plt.tick_params(axis="y", labelsize=plot_dict["labelsize"]) if plot_dict["title"] is not None: plt.title(plot_dict["title"]) - # Return figure number (so we can reuse/add onto/save this figure if desired). + # Return figure number + # (so we can reuse/add onto/save this figure if desired). return fig.number @@ -384,8 +388,7 @@ def __init__(self): self.base_hist = BaseHistogram() def __call__(self, metric_value, slicer, user_plot_dict, fignum=None): - """ - Histogram metric_value for all healpix points. + """Histogram metric_value for all healpix points. """ if "Healpix" not in slicer.slicer_name: raise ValueError("HealpixHistogram is for use with healpix slicer.") diff --git a/rubin_sim/maf/stackers/coord_stackers.py b/rubin_sim/maf/stackers/coord_stackers.py index 1605184ff..b3cf93ac6 100644 --- a/rubin_sim/maf/stackers/coord_stackers.py +++ b/rubin_sim/maf/stackers/coord_stackers.py @@ -13,28 +13,28 @@ def ra_dec2_alt_az(ra, dec, lat, lon, mjd, altonly=False): """Convert RA/Dec (and telescope site lat/lon) to alt/az. - This uses simple equations and ignores aberation, precession, nutation, etc. + This uses simple equations and ignores aberation, precession, nutation. Parameters ---------- - ra : array_like + ra : `np.ndarray`, (N,) RA, in radians. - dec : array_like + dec : `np.ndarray`, (N,) Dec, in radians. Must be same length as `ra`. - lat : float + lat : `float` Latitude of the observatory in radians. - lon : float + lon : `float` Longitude of the observatory in radians. - mjd : float + mjd : `float` Modified Julian Date. - altonly : bool, optional + altonly : `bool`, optional Calculate altitude only. Returns ------- - alt : numpy.array + alt : `np.ndarray`, (N,) Altitude, same length as `ra` and `dec`. Radians. - az : numpy.array + az : `np.ndarray`, (N,) Azimuth, same length as `ra` and `dec`. Radians. """ lmst = calc_lmst(mjd, lon) diff --git a/rubin_sim/maf/utils/generate_fov_map.py b/rubin_sim/maf/utils/generate_fov_map.py index eabea3b11..36afda59b 100644 --- a/rubin_sim/maf/utils/generate_fov_map.py +++ b/rubin_sim/maf/utils/generate_fov_map.py @@ -2,6 +2,7 @@ from rubin_scheduler.utils import gnomonic_project_tosky, gnomonic_project_toxy # Use the main stack to make a rough array. +# This code needs an update to work without lsst.sims. if __name__ == "__main__": import lsst.sims.utils as simsUtils diff --git a/rubin_sim/maf/web/maf_run_results.py b/rubin_sim/maf/web/maf_run_results.py index d4fd42f5b..39fd6fcab 100644 --- a/rubin_sim/maf/web/maf_run_results.py +++ b/rubin_sim/maf/web/maf_run_results.py @@ -177,7 +177,6 @@ def metric_ids_to_metrics(self, metric_ids, metrics=None): """ if metrics is None: metrics = self.metrics - # this should be faster with pandas (and self.metrics.query('metric_id in @metric_ids')) metrics = metrics[np.in1d(metrics["metric_id"], metric_ids)] return metrics @@ -449,7 +448,8 @@ def plot_dict(self, plots=None): Given an array of plots (for a single metric usually). Returns an ordered dict with 'plot_type' for interfacing with jinja2 templates. - plot_dict == {'SkyMap': {'plot_file': [], 'thumb_file', []}, 'Histogram': {}..} + plot_dict == + {'SkyMap': {'plot_file': [], 'thumb_file', []}, 'Histogram': {}..} If no plot of a particular type, the plot_file and thumb_file are empty lists. From 32deb6515e9569aee801ab950083dbdbd90eec04 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Fri, 2 Feb 2024 14:49:41 -0800 Subject: [PATCH 18/26] Include change requested by @thalos23 in tickets/OPSIM-1048 --- rubin_sim/maf/metrics/crowding_metric.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/rubin_sim/maf/metrics/crowding_metric.py b/rubin_sim/maf/metrics/crowding_metric.py index 4e3e7caff..d76a494c2 100644 --- a/rubin_sim/maf/metrics/crowding_metric.py +++ b/rubin_sim/maf/metrics/crowding_metric.py @@ -239,8 +239,13 @@ def run(self, data_slice, slice_point=None): mag_vector = slice_point[f"starMapBins_{self.filtername}"][1:] lum_func = slice_point[f"starLumFunc_{self.filtername}"] # Magnitude uncertainty given crowding + # Use minimum here, however this may not be appropriate in all cases. + # (minimum makes value here match MagCrowding above, however + # the minimum seeing could also correlate with poor m5 values) + # Likely there should be some comparison between errors from crowding + # and errors from photometric noise that we're just not doing. dmag_crowd = _comp_crowd_error( - mag_vector, lum_func, data_slice[self.seeing_col], single_mag=self.rmag + mag_vector, lum_func, min(data_slice[self.seeing_col]), single_mag=self.rmag ) result = np.mean(dmag_crowd) return result From 48e4dcfaafd17c26b7a3bdf984404893ab54fcc2 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Fri, 2 Feb 2024 23:33:14 -0800 Subject: [PATCH 19/26] isort --- rubin_sim/maf/maf_contrib/__init__.py | 2 +- .../maf_contrib/calculate_lsst_field_visibility_astropy.py | 4 +--- rubin_sim/maf/maf_contrib/star_counts/starcount.py | 1 + rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py | 2 +- rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py | 2 +- 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/rubin_sim/maf/maf_contrib/__init__.py b/rubin_sim/maf/maf_contrib/__init__.py index 2c9235f63..251546c83 100644 --- a/rubin_sim/maf/maf_contrib/__init__.py +++ b/rubin_sim/maf/maf_contrib/__init__.py @@ -1,5 +1,6 @@ # Add similar lines (from .filename import *) when you add new metrics, # stackers or slicers. +from .calculate_lsst_field_visibility_astropy import * from .depth_limited_num_gal_metric import * from .filter_pair_t_gaps_metric import * from .grb_transient_metric import * @@ -21,4 +22,3 @@ from .var_depth_metric import * from .xrb_metrics import * from .young_stellar_objects_metric import * -from .calculate_lsst_field_visibility_astropy import * diff --git a/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py b/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py index dcd9404f4..9edd457f7 100644 --- a/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py +++ b/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py @@ -10,9 +10,7 @@ import numpy as np from astropy.coordinates import AltAz, EarthLocation, SkyCoord, get_sun from astropy.time import Time, TimeDelta - -from rubin_scheduler.utils import Site -from rubin_scheduler.utils import approx_ra_dec2_alt_az +from rubin_scheduler.utils import Site, approx_ra_dec2_alt_az def calculate_lsst_field_visibility( diff --git a/rubin_sim/maf/maf_contrib/star_counts/starcount.py b/rubin_sim/maf/maf_contrib/star_counts/starcount.py index c7e8bcdbc..787ccfbf9 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/starcount.py +++ b/rubin_sim/maf/maf_contrib/star_counts/starcount.py @@ -7,6 +7,7 @@ # between a given set of distances. For use with Field Star Count metric import numpy as np + from . import coords, stellardensity skyarea = 41253.0 diff --git a/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py b/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py index 422b1d1e8..318757820 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py +++ b/rubin_sim/maf/maf_contrib/star_counts/starcount_bymass.py @@ -10,7 +10,7 @@ import numpy as np -from scipy.optimize import newton +from scipy.optimize import newton from . import abs_mag, spec_type from .starcount import starcount diff --git a/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py b/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py index 296d0c76d..39ae1fcf2 100644 --- a/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py +++ b/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py @@ -19,7 +19,7 @@ from sncosmo import Model, TimeSeriesSource, read_griddata_ascii except ImportError: pass -from astropy.cosmology import Planck15 as cosmo ## noqa N813 +from astropy.cosmology import Planck15 as cosmo # # noqa N813 from rubin_sim.maf.metrics import BaseMetric from rubin_sim.maf.utils import m52snr From 4249f7524ddd48ea963cc9e86a8f2b301aeada51 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Fri, 2 Feb 2024 23:34:46 -0800 Subject: [PATCH 20/26] Temporarily pin pytest and black --- test-requirements.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index fd4670329..2a2721707 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,5 +1,6 @@ -pytest +pytest<8.0.0 +black>=23.0.0,<24.0.0 +ruff pytest-cov pytest-black -black -ruff + From b15bfe83014b7ec2c7c820ca8680b163940de21d Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sat, 3 Feb 2024 00:42:11 -0800 Subject: [PATCH 21/26] Black black --- .github/workflows/run_all_tests.yaml | 65 ------------------- .../lv_dwarfs/lv_dwarfs_metrics.py | 2 +- .../maf/maf_contrib/star_counts/coords.py | 1 - .../maf/maf_contrib/star_counts/starcount.py | 1 - rubin_sim/maf/metrics/calibration_metrics.py | 4 +- rubin_sim/maf/metrics/phase_gap_metric.py | 7 +- rubin_sim/maf/plots/spatial_plotters.py | 3 +- 7 files changed, 6 insertions(+), 77 deletions(-) delete mode 100644 .github/workflows/run_all_tests.yaml diff --git a/.github/workflows/run_all_tests.yaml b/.github/workflows/run_all_tests.yaml deleted file mode 100644 index 34f69d353..000000000 --- a/.github/workflows/run_all_tests.yaml +++ /dev/null @@ -1,65 +0,0 @@ -name: Run All of the Tests - -on: - push: - branches: - - main - tags: - - "*" - workflow_dispatch: - -jobs: - AllTests: - name: Run All Tests (${{ matrix.python-version }}, ${{ matrix.os }}) - runs-on: ${{ matrix.os }} - strategy: - fail-fast: True - matrix: - os: ["ubuntu-latest-8-cores"] - python-version: ["3.11"] - steps: - - uses: actions/checkout@v4 - - uses: conda-incubator/setup-miniconda@v2 - with: - auto-update-conda: true - python-version: ${{ matrix.python-version }} - channels: conda-forge,defaults - miniforge-variant: Mambaforge - use-mamba: true - channel-priority: strict - show-channel-urls: true - - - name: configure conda and install requirements - shell: bash -l {0} - run: | - mamba config --set always_yes yes - mamba install --quiet --file=requirements.txt - mamba install --quiet --file=test-requirements.txt - - - name: install rubin_sim - shell: bash -l {0} - run: | - echo `pwd` - ls ${{ github.workspace }} - python -m pip install . - - - name: download rubin_sim_data components needed for unit tests - shell: bash -l {0} - run: | - export RUBIN_SIM_DATA_DIR=${{ github.workspace }}/data_dir - rs_download_data --force --tdqm_disable - - - name: conda list - shell: bash -l {0} - run: conda list - - - name: run tests - shell: bash -l {0} - run: | - export RUBIN_SIM_DATA_DIR=${{ github.workspace }}/data_dir - pytest -r a -v --cov=rubin_sim --cov=tests --cov-report=xml --cov-report=term --cov-branch - - - name: Upload coverage to codecov - uses: codecov/codecov-action@v2 - with: - file: coverage.xml \ No newline at end of file diff --git a/rubin_sim/maf/maf_contrib/lv_dwarfs/lv_dwarfs_metrics.py b/rubin_sim/maf/maf_contrib/lv_dwarfs/lv_dwarfs_metrics.py index eac8c63cf..8cf8f3e46 100644 --- a/rubin_sim/maf/maf_contrib/lv_dwarfs/lv_dwarfs_metrics.py +++ b/rubin_sim/maf/maf_contrib/lv_dwarfs/lv_dwarfs_metrics.py @@ -343,7 +343,7 @@ def run(self, data_slice, slice_point=None): if ngal_sqarcmin < 0 or nstar_sqarcmin < 0: print( f"Here be a problem - ngals_sqarcmin {ngal_sqarcmin} or nstar_sqarcmin {nstar_sqarcmin} " - f'are negative. depths: {g5}, {i5}. ' + f"are negative. depths: {g5}, {i5}. " f'{slice_point["ra"], slice_point["dec"], slice_point["sid"]}' ) # The number of stars required to reach nsigma is diff --git a/rubin_sim/maf/maf_contrib/star_counts/coords.py b/rubin_sim/maf/maf_contrib/star_counts/coords.py index 0a7c040c9..f20a36794 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/coords.py +++ b/rubin_sim/maf/maf_contrib/star_counts/coords.py @@ -95,4 +95,3 @@ def gal_cyn(b_deg, l_deg, dist): R = np.power(x_new**2 + y**2, 0.5) rho = np.arctan(y / x) return R, rho, Z - diff --git a/rubin_sim/maf/maf_contrib/star_counts/starcount.py b/rubin_sim/maf/maf_contrib/star_counts/starcount.py index 787ccfbf9..13cc0b4ba 100644 --- a/rubin_sim/maf/maf_contrib/star_counts/starcount.py +++ b/rubin_sim/maf/maf_contrib/star_counts/starcount.py @@ -28,4 +28,3 @@ def starcount(eq_ra, eq_dec, d1, d2): densities = [stellardensity.stellardensity(x[0], x[2]) for x in positions] totalcount = np.sum(np.asarray(volumes) * np.asarray(densities)) return totalcount - diff --git a/rubin_sim/maf/metrics/calibration_metrics.py b/rubin_sim/maf/metrics/calibration_metrics.py index 498f447de..698b22f67 100644 --- a/rubin_sim/maf/metrics/calibration_metrics.py +++ b/rubin_sim/maf/metrics/calibration_metrics.py @@ -99,9 +99,7 @@ def __init__( ) self.comment += "divided by the minimum parallax uncertainty possible " self.comment += "(if all visits were six months apart). " - self.comment += "Values closer to 1 indicate more optimal "\ - "scheduling for parallax measurement." - + self.comment += "Values closer to 1 indicate more optimal " "scheduling for parallax measurement." def _final_sigma(self, position_errors, ra_pi_amp, dec_pi_amp): """Assume parallax in RA and DEC are fit independently, then combined. diff --git a/rubin_sim/maf/metrics/phase_gap_metric.py b/rubin_sim/maf/metrics/phase_gap_metric.py index 8ec0142f1..be8479ab7 100644 --- a/rubin_sim/maf/metrics/phase_gap_metric.py +++ b/rubin_sim/maf/metrics/phase_gap_metric.py @@ -99,8 +99,8 @@ def reduce_largest_gap(self, metric_val): # To fit a periodic source well, you need to cover the full phase, # and fit the amplitude. class PeriodicQualityMetric(BaseMetric): - """Evaluate phase coverage over a given period. - """ + """Evaluate phase coverage over a given period.""" + def __init__( self, mjd_col="observationStartMJD", @@ -119,8 +119,7 @@ def __init__( ) def _calc_phase(self, data_slice): - """1 is perfectly balanced phase coverage, 0 is no effective coverage. - """ + """1 is perfectly balanced phase coverage, 0 is no effective coverage.""" angles = data_slice[self.mjd_col] % self.period angles = angles / self.period * 2.0 * np.pi x = np.cos(angles) diff --git a/rubin_sim/maf/plots/spatial_plotters.py b/rubin_sim/maf/plots/spatial_plotters.py index 02fae6b07..504b47473 100644 --- a/rubin_sim/maf/plots/spatial_plotters.py +++ b/rubin_sim/maf/plots/spatial_plotters.py @@ -388,8 +388,7 @@ def __init__(self): self.base_hist = BaseHistogram() def __call__(self, metric_value, slicer, user_plot_dict, fignum=None): - """Histogram metric_value for all healpix points. - """ + """Histogram metric_value for all healpix points.""" if "Healpix" not in slicer.slicer_name: raise ValueError("HealpixHistogram is for use with healpix slicer.") plot_dict = {} From 7ac0015f16e8590f6b12bfdbf903c75f529341c1 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Tue, 27 Feb 2024 10:47:27 -0800 Subject: [PATCH 22/26] update workflows --- .github/workflows/ruff.yaml | 4 ++-- .github/workflows/test_and_build.yaml | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ruff.yaml b/.github/workflows/ruff.yaml index b7e2b3e68..d30a50372 100644 --- a/.github/workflows/ruff.yaml +++ b/.github/workflows/ruff.yaml @@ -13,12 +13,12 @@ jobs: isort: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: isort/isort-action@v1 with: requirements-files: "requirements.txt test-requirements.txt" ruff: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: chartboost/ruff-action@v1 \ No newline at end of file diff --git a/.github/workflows/test_and_build.yaml b/.github/workflows/test_and_build.yaml index 1e097b784..391e47bce 100644 --- a/.github/workflows/test_and_build.yaml +++ b/.github/workflows/test_and_build.yaml @@ -45,7 +45,7 @@ jobs: run: | echo `pwd` ls ${{ github.workspace }} - python -m pip install . --no-deps + python -m pip install -e . --no-deps - name: Access rubin-sim-data cache id: cache-rs @@ -70,11 +70,14 @@ jobs: shell: bash -l {0} run: conda list + - name: run black + shell: bash -l {0} + run: black --check + - name: run tests shell: bash -l {0} run: | export RUBIN_SIM_DATA_DIR=~/rubin_sim_data - #pytest -r a -v pytest -r a -v --cov=rubin_sim --cov=tests --cov-report=xml --cov-report=term --cov-branch - name: Upload coverage to codecov From 7c572b209955328eac097aab2f1a3a8e4f2d1547 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sun, 3 Mar 2024 23:33:08 -0800 Subject: [PATCH 23/26] Further docstring updates. --- ...calculate_lsst_field_visibility_astropy.py | 4 +- .../maf/maf_contrib/grb_transient_metric.py | 4 +- .../galaxy_counts_metric_extended.py | 40 +- .../galaxy_counts_with_pixel_calibration.py | 3 +- .../maf_contrib/transient_ascii_sed_metric.py | 2 +- rubin_sim/maf/maf_contrib/xrb_metrics.py | 18 +- rubin_sim/maf/maps/create_gaia_density_map.py | 3 - rubin_sim/maf/maps/dust_map_3d.py | 69 +++- rubin_sim/maf/maps/trilegal_map.py | 16 +- rubin_sim/maf/metric_bundles/metric_bundle.py | 45 +- .../maf/metric_bundles/metric_bundle_group.py | 85 ++-- .../maf/metric_bundles/mo_metric_bundle.py | 141 ++++++- rubin_sim/maf/metrics/mo_metrics.py | 389 +++++++++++++----- rubin_sim/maf/metrics/pair_metric.py | 18 +- .../maf/metrics/periodic_detect_metric.py | 20 +- rubin_sim/maf/metrics/phase_gap_metric.py | 26 +- rubin_sim/maf/metrics/sn_n_sn_metric.py | 139 ++++--- rubin_sim/maf/metrics/sn_sl_metric.py | 98 +++-- rubin_sim/maf/metrics/sn_snr_metric.py | 5 +- rubin_sim/maf/metrics/string_count_metric.py | 29 +- rubin_sim/moving_objects/base_obs.py | 2 +- rubin_sim/moving_objects/cheby_fits.py | 50 +-- rubin_sim/moving_objects/ooephemerides.py | 136 +++--- .../phot_utils/photometric_parameters.py | 10 +- rubin_sim/selfcal/generate_catalog.py | 25 +- rubin_sim/selfcal/solver.py | 20 +- 26 files changed, 896 insertions(+), 501 deletions(-) diff --git a/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py b/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py index 9edd457f7..3ceae830a 100644 --- a/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py +++ b/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py @@ -25,7 +25,7 @@ def calculate_lsst_field_visibility( ---------- ra : `float` RA in decimal degrees. - dec : `float + dec : `float` Declination in decimal degrees start_date : `astropy.time.Time` Start date for calculations @@ -125,7 +125,7 @@ def calculate_lsst_field_visibility_fast( ---------- ra : `float` RA in decimal degrees. - dec : `float + dec : `float` Declination in decimal degrees start_date : `astropy.time.Time` Start date for calculations diff --git a/rubin_sim/maf/maf_contrib/grb_transient_metric.py b/rubin_sim/maf/maf_contrib/grb_transient_metric.py index 564858aee..85b00cdaa 100644 --- a/rubin_sim/maf/maf_contrib/grb_transient_metric.py +++ b/rubin_sim/maf/maf_contrib/grb_transient_metric.py @@ -10,7 +10,9 @@ class GRBTransientMetric(metrics.BaseMetric): - """Detections for on-axis GRB afterglows decaying as + """Evaluate the likelihood of detecting a GRB optical counterpart. + + Detections for an on-axis GRB afterglows decaying as F(t) = F(1min)((t-t0)/1min)^-alpha. No jet break, for now. Derived from TransientMetric, but calculated with reduce functions to diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py index 9b0c1ffe9..2dc5d08bd 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py @@ -19,6 +19,7 @@ __all__ = ("GalaxyCountsMetricExtended",) +import warnings import numpy as np import scipy @@ -31,12 +32,10 @@ class GalaxyCountsMetricExtended(BaseMetric): - """ + """Estimate galaxy counts per HEALpix pixel. - Estimate galaxy counts per HEALpix pixel. Accommodates for dust extinction, magnitude cuts, and specification of the galaxy LF to specific redshift bin to consider. - Dependency (aside from MAF): constantsForPipeline.py Parameters @@ -126,31 +125,22 @@ def __init__( def _gal_count(self, apparent_mag, coaddm5): # calculate the change in the power law constant based on the band # colors assumed here: (u-g)=(g-r)=(r-i)=(i-z)= (z-y)=0.4 - if self.filter_band == "u": - # dimmer than i: u-g= 0.4 => g= u-0.4 => i= u-0.4*3 - band_correction = -0.4 * 3.0 - elif self.filter_band == "g": - # dimmer than i: g-r= 0.4 => r= g-0.4 => i= g-0.4*2 - band_correction = -0.4 * 2.0 - elif self.filter_band == "r": - # dimmer than i: i= r-0.4 - band_correction = -0.4 - elif self.filter_band == "i": - # i - band_correction = 0.0 - elif self.filter_band == "z": - # brighter than i: i-z= 0.4 => i= z+0.4 - band_correction = 0.4 - elif self.filter_band == "y": - # brighter than i: z-y= 0.4 => z= y+0.4 => i= y+0.4*2 - band_correction = 0.4 * 2.0 - else: - print("ERROR: Invalid band in GalaxyCountsMetricExtended. Assuming i-band.") - band_correction = 0 + factor = 0.4 + band_correction_dict = {'u': -3.0 * factor, + 'g': -2.0 * factor, + 'r': -1.0 * factor, + 'i': 0.0, + 'z': factor, + 'y': 2.0 * factor} + if self.filter_band not in band_correction_dict: + warnings.warn("Invalid band in GalaxyCountsMetricExtended. " + "Assuming i-band instead.") + band_correction = band_correction_dict.get(self.filter_band, 0.0) # check to make sure that the z-bin assigned is valid. if (self.redshift_bin != "all") and (self.redshift_bin not in list(self.power_law_const_a.keys())): - print("ERROR: Invalid redshift bin in GalaxyCountsMetricExtended. Defaulting to all redshifts.") + warnings.warn("Invalid redshift bin in GalaxyCountsMetricExtended. " + "Defaulting to all redshifts.") self.redshift_bin = "all" # consider the power laws diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_with_pixel_calibration.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_with_pixel_calibration.py index f821259dd..31eafcff1 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_with_pixel_calibration.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_with_pixel_calibration.py @@ -37,8 +37,7 @@ def galaxy_counts_with_pixel_calibration( cfhtls_counts=False, normalized_mock_catalog_counts=True, ): - """ - Estimate galaxy counts for a given HEALpix pixel directly + """Estimate galaxy counts for a given HEALpix pixel directly (without a slicer). Parameters diff --git a/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py b/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py index 39ae1fcf2..2d69e818d 100644 --- a/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py +++ b/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py @@ -19,7 +19,7 @@ from sncosmo import Model, TimeSeriesSource, read_griddata_ascii except ImportError: pass -from astropy.cosmology import Planck15 as cosmo # # noqa N813 +from astropy.cosmology import Planck15 as cosmo # noqa N813 from rubin_sim.maf.metrics import BaseMetric from rubin_sim.maf.utils import m52snr diff --git a/rubin_sim/maf/maf_contrib/xrb_metrics.py b/rubin_sim/maf/maf_contrib/xrb_metrics.py index 3db9796e7..80428497f 100644 --- a/rubin_sim/maf/maf_contrib/xrb_metrics.py +++ b/rubin_sim/maf/maf_contrib/xrb_metrics.py @@ -270,15 +270,15 @@ class XRBPopMetric(BaseMetric): or if it is detected pts_early times within t_early days of the start of the outburst. - Parameters - ---------- - pts_needed : `int`, opt - Minimum number of detections, for simple `detected` option. - mjd0 : `float`, opt - Start of survey. - output_lc : `bool`, opt - If True, output lightcurve points. - If False, just return metric values. + Parameters + ---------- + pts_needed : `int`, opt + Minimum number of detections, for simple `detected` option. + mjd0 : `float`, opt + Start of survey. + output_lc : `bool`, opt + If True, output lightcurve points. + If False, just return metric values. """ def __init__( diff --git a/rubin_sim/maf/maps/create_gaia_density_map.py b/rubin_sim/maf/maps/create_gaia_density_map.py index bdb2cf1b1..c11fc25b1 100755 --- a/rubin_sim/maf/maps/create_gaia_density_map.py +++ b/rubin_sim/maf/maps/create_gaia_density_map.py @@ -115,9 +115,6 @@ distances = angular_separation(result["ra"], result["dec"], ra[i], dec[i]) result = result[np.where(distances < radius)] - import pdb - - pdb.set_trace() # I could think of setting the chunksize to something really large, # then only doing one chunk? # Or maybe setting up a way to break out of the loop if diff --git a/rubin_sim/maf/maps/dust_map_3d.py b/rubin_sim/maf/maps/dust_map_3d.py index 74e489284..894f013d3 100644 --- a/rubin_sim/maf/maps/dust_map_3d.py +++ b/rubin_sim/maf/maps/dust_map_3d.py @@ -13,27 +13,9 @@ class DustMap3D(BaseMap): """Add 3-d E(B-V) values to the slice points. - The slice point dictionary keys are expanded with the following keys: - ebv3d_dists - - the distances from the 3d dust map at each slice_point (in pc) - `ebv3d_ebvs` - - the E(B-V) values corresponding to each distance at each slice_point - `ebv3d_ebv_at_` - - the (single) ebv value at the nearest distance to dist_pc - `ebv3d_dist_at_` - - the (single) distance value corresponding to where extinction and - distance modulus combine to create a m-Mo value of d_mag, for the filter - specified in filtername (in pc). - Note that and will be formatted with a - single decimal place. - - The additional method 'distance_at_mag' can be called either with the - distances and ebv values for the entire map or with the values from a - single slice_point, in order to calculate the distance at which - extinction and distance modulus combine to create a m-Mo value closest - to 'dmag' in any filter. This is the same value as would be reported in - ebv3d_dist_at_, but can be calculated on the fly, - allowing variable filters and dmag values. + See "notes" below for a discussion of the content of the map keys, + and functionality that can be accessed by calling + `DustMap3d.distance_at_mag` with the key values at a given slice point. Parameters ---------- @@ -57,6 +39,30 @@ class DustMap3D(BaseMap): r_x : `dict` {`str`: `float`}, opt Per-filter dust extinction curve coefficients. Calculated by rubin_sim.photUtils.DustValues if "None". + + Notes + ----- + The slice point dictionary keys are expanded with the following keys: + ebv3d_dists - + the distances from the 3d dust map at each slice_point (in pc) + `ebv3d_ebvs` - + the E(B-V) values corresponding to each distance at each slice_point + `ebv3d_ebv_at_` - + the (single) ebv value at the nearest distance to dist_pc + `ebv3d_dist_at_` - + the (single) distance value corresponding to where extinction and + distance modulus combine to create a m-Mo value of d_mag, for the filter + specified in filtername (in pc). + Note that and will be formatted with a + single decimal place. + + The additional method 'distance_at_mag' can be called either with the + distances and ebv values for the entire map or with the values from a + single slice_point, in order to calculate the distance at which + extinction and distance modulus combine to create a m-Mo value closest + to 'dmag' in any filter. This is the same value as would be reported in + ebv3d_dist_at_, but can be calculated on the fly, + allowing variable filters and dmag values. """ def __init__( @@ -133,6 +139,27 @@ def run(self, slice_points): return slice_points def distance_at_dmag(self, dmag, dists, ebvs, filtername=None): + """Calculate the distance at which a given change of magnitude would + occur (including distance modulus and dust extinction). + + Parameters + ---------- + dmag : `float` + The magnitude change expected. + dists : `np.ndarray`, (N,) + The distances corresponding to the ebv values. + ebvs : `np.ndarray`, (N,) + The ebv values at each distance. + filtername : `str` or None + The filter in which to evaluate the magnitude change. + If None, uses the default filter for the map. + The filter translates ebv into magnitudes of extinction. + + Returns + ------- + dist_dmag : `float` + The distance at which the specified dmag occurs. + """ # Provide this as a method which could be used for a single # slice_point as well as for whole map # (single slice_point means you could calculate this for any diff --git a/rubin_sim/maf/maps/trilegal_map.py b/rubin_sim/maf/maps/trilegal_map.py index ab2c7dd37..b6bf47cd5 100644 --- a/rubin_sim/maf/maps/trilegal_map.py +++ b/rubin_sim/maf/maps/trilegal_map.py @@ -16,13 +16,6 @@ class TrilegalDensityMap(BaseMap): """Read and hold the cumulative stellar luminosity function for each slice point. - The underlying stellar luminosity function map is available in a - variety of nsides, and contains - stars per sq degree at a series of magnitudes (the map contains - `starLumFunc_` and `starMapBins_`). - For slice points which do not match one of the native nside options, - the map uses the nearest healpix point on the specified nside grid. - The stellar luminosity function comes from the TRILEGAL model. Parameters @@ -33,6 +26,15 @@ class TrilegalDensityMap(BaseMap): The HEALpix nside (can be 64 or 128). Default 64. ext : `bool`, opt Use the full sky maps. Default True. + + Notes + ----- + The underlying stellar luminosity function map is available in a + variety of nsides, and contains + stars per sq degree at a series of magnitudes (the map contains + `starLumFunc_` and `starMapBins_`). + For slice points which do not match one of the native nside options, + the map uses the nearest healpix point on the specified nside grid. """ def __init__(self, filtername="r", nside=64, ext=True): diff --git a/rubin_sim/maf/metric_bundles/metric_bundle.py b/rubin_sim/maf/metric_bundles/metric_bundle.py index fdf588d06..f97f55621 100644 --- a/rubin_sim/maf/metric_bundles/metric_bundle.py +++ b/rubin_sim/maf/metric_bundles/metric_bundle.py @@ -29,26 +29,7 @@ def create_empty_metric_bundle(): class MetricBundle: - """Define the "thing" you are measuring, with a combination of - * metric (calculated per data_slice) - * slicer (how to create the data_slices) - * constraint (an optional definition of a large subset of data) - - Together these define a unique combination of an opsim benchmark. - An example would be: - a CountMetric, a HealpixSlicer, and a constraint of 'filter="r"'. - - After the metric is evaluated at each slice_point created by the - slicer, the resulting metric values are saved in the MetricBundle. - - The MetricBundle also saves the summary metrics to be used - to generate summary statistics over those metric values, - as well as the resulting summary statistic values. - - Plotting parameters and display parameters (for show_maf) are saved - in the MetricBundle, as well as additional info_label such as the - opsim run name, and relevant stackers and maps - to apply when calculating the metric values. + """Define a metric bundle combination of metric, slicer, and constraint. Parameters ---------- @@ -98,6 +79,30 @@ class MetricBundle: A list of pre-configured maps to use for the metric. This will be auto-generated if specified by the metric class, but pre-configured versions will override these. + + Notes + ----- + Define the "thing" you are measuring, with a combination of + * metric (calculated per data_slice) + * slicer (how to create the data_slices) + * constraint (an optional definition of a large subset of data) + + Together these define a unique combination of an opsim benchmark, + or "metric bundle". + An example would be: + a CountMetric, a HealpixSlicer, and a constraint of 'filter="r"'. + + After the metric is evaluated at each slice_point created by the + slicer, the resulting metric values are saved in the MetricBundle. + + The MetricBundle also saves the summary metrics to be used + to generate summary statistics over those metric values, + as well as the resulting summary statistic values. + + Plotting parameters and display parameters (for show_maf) are saved + in the MetricBundle, as well as additional info_label such as the + opsim run name, and relevant stackers and maps + to apply when calculating the metric values. """ col_info = ColInfo() diff --git a/rubin_sim/maf/metric_bundles/metric_bundle_group.py b/rubin_sim/maf/metric_bundles/metric_bundle_group.py index a22923d0b..64908f893 100644 --- a/rubin_sim/maf/metric_bundles/metric_bundle_group.py +++ b/rubin_sim/maf/metric_bundles/metric_bundle_group.py @@ -19,19 +19,20 @@ def make_bundles_dict_from_list(bundle_list): """Utility to convert a list of MetricBundles into a dictionary, - keyed by the fileRoot names. + keyed by the file_root names. - Raises an exception if the fileroot duplicates another metricBundle. + Raises an exception if the file_root duplicates another metricBundle. (Note this should alert to potential cases of filename duplication). Parameters ---------- bundle_list : `list` [`MetricBundles`] + List of metric bundles to convert into a dict. """ b_dict = {} for b in bundle_list: if b.file_root in b_dict: - raise NameError("More than one metricBundle is using the same fileroot, %s" % (b.file_root)) + raise NameError("More than one metric_bundle is using the same file_root, %s" % (b.file_root)) b_dict[b.file_root] = b return b_dict @@ -39,21 +40,6 @@ def make_bundles_dict_from_list(bundle_list): class MetricBundleGroup: """Calculate all values for a group of MetricBundles. - The MetricBundleGroup will query data from a single database table - (for multiple constraints), use that data to calculate metric values - for multiple slicers, and calculate summary statistics and - generate plots for all metrics included in - the dictionary passed to the MetricBundleGroup. - - We calculate the metric values here, rather than in the - individual MetricBundles, because it is much more efficient to step - through a slicer once (and calculate all the relevant metric values - at each point) than it is to repeat this process multiple times. - - The MetricBundleGroup also determines how to efficiently group - the MetricBundles to reduce the number of sql queries of the database, - grabbing larger chunks of data at once. - Parameters ---------- bundle_dict : `dict` or `list` [`MetricBundles`] @@ -88,6 +74,23 @@ class MetricBundleGroup: db_table : `str`, opt The name of the table in the db_obj to query for data. For modern opsim outputs, this table is `observations` (default None). + + Notes + ----- + The MetricBundleGroup will query data from a single database table + (for multiple constraints), use that data to calculate metric values + for multiple slicers, and calculate summary statistics and + generate plots for all metrics included in + the dictionary passed to the MetricBundleGroup. + + We calculate the metric values here, rather than in the + individual MetricBundles, because it is much more efficient to step + through a slicer once (and calculate all the relevant metric values + at each point) than it is to repeat this process multiple times. + + The MetricBundleGroup also determines how to efficiently group + the MetricBundles to reduce the number of sql queries of the database, + grabbing larger chunks of data at once. """ def __init__( @@ -141,12 +144,6 @@ def __init__( def _check_compatible(self, metric_bundle1, metric_bundle2): """Check if two MetricBundles are "compatible". - Compatible indicates that the sql constraints, the slicers, - and the maps are the same, and - that the stackers do not interfere with each other - (i.e. are not trying to set the same column in different ways). - Returns True if the MetricBundles are compatible, False if not. - Parameters ---------- metric_bundle1 : `MetricBundle` @@ -155,6 +152,14 @@ def _check_compatible(self, metric_bundle1, metric_bundle2): Returns ------- match : `bool` + + Notes + ----- + Compatible indicates that the sql constraints, the slicers, + and the maps are the same, and + that the stackers do not interfere with each other + (i.e. are not trying to set the same column in different ways). + Returns True if the MetricBundles are compatible, False if not. """ if metric_bundle1.constraint != metric_bundle2.constraint: return False @@ -209,11 +214,8 @@ def _find_compatible_lists(self): self.compatible_lists = compatible_lists def run_all(self, clear_memory=False, plot_now=False, plot_kwargs=None): - """Runs all the metricBundles in the metricBundleGroup, - over all constraints. - - Calculates metric values, then runs reduce functions and summary - statistics for all MetricBundles. + """Calculates metric values, then runs reduce functions and summary + statistics for all MetricBundles, over all constraints. Parameters ---------- @@ -246,6 +248,11 @@ def set_current(self, constraint): constraint will be included in a subset identified as the currentBundleDict. These are the active metrics to be calculated and plotted, etc. + + Notes + ----- + This is useful, for the context of running only a specific set + of metric bundles so that the user can provide `sim_data` directly. """ if constraint is None: constraint = "" @@ -267,10 +274,7 @@ def run_current( plot_now=False, plot_kwargs=None, ): - """Run all the metricBundles which match this constraint in the - metricBundleGroup. - - Calculates the metric values, then runs reduce functions and + """Calculates the metric values, then runs reduce functions and summary statistics for metrics in the current set only (see self.setCurrent). @@ -278,18 +282,23 @@ def run_current( ---------- constraint : `str` constraint to use to set the currently active metrics - sim_data : `np.ndarray`, ops + sim_data : `np.ndarray`, opt If simData is not None, then this numpy structured array is used instead of querying data from the dbObj. - clear_memory : `bool`, ops + clear_memory : `bool`, opt If True, metric values are deleted from memory after they are calculated (and saved to disk). - plot_now : `bool`, ops + plot_now : `bool`, opt Plot immediately after calculating metric values (instead of the usual procedure, which is to plot after metric values are calculated for all constraints). - plot_kwargs : kwargs, ops + plot_kwargs : kwargs, opt Plotting kwargs to pass to plotCurrent. + + Notes + ----- + This is useful, for the context of running only a specific set + of metric bundles so that the user can provide `sim_data` directly. """ self.set_current(constraint) @@ -390,7 +399,7 @@ def get_data(self, constraint): print("Found %i visits" % (self.sim_data.size)) def _run_compatible(self, compatible_list): - """Runs a set of 'compatible' metricbundles in the MetricBundleGroup + """Runs a set of 'compatible' metric_bundles in the MetricBundleGroup dictionary identified by 'compatible_list' keys. A compatible list of MetricBundles is a subset of the diff --git a/rubin_sim/maf/metric_bundles/mo_metric_bundle.py b/rubin_sim/maf/metric_bundles/mo_metric_bundle.py index 17f9c8084..59a993cb9 100644 --- a/rubin_sim/maf/metric_bundles/mo_metric_bundle.py +++ b/rubin_sim/maf/metric_bundles/mo_metric_bundle.py @@ -25,7 +25,7 @@ def create_empty_mo_metric_bundle(): Returns ------- - ~rubin_sim.maf.metricBundles.MoMetricBundle + MoMetricBundle : `~rubin_sim.maf.metricBundles.MoMetricBundle` An empty metric bundle, configured with just the :class:`BaseMetric` and :class:`BaseSlicer`. """ @@ -33,16 +33,12 @@ def create_empty_mo_metric_bundle(): def make_completeness_bundle(bundle, completeness_metric, h_mark=None, results_db=None): - """Make a mock metric bundle from a bundle which had - MoCompleteness or MoCumulativeCompleteness summary - metrics run. This lets us use the plotHandler + plots.MetricVsH - to generate plots. - Will also work with completeness metric run in order to calculate - fraction of the population, or with MoCompletenessAtTime metric. + """Evaluate a MoMetricBundle with a completeness-style metric, and + downsample into a new MoMetricBundle marginalized over the population. Parameters ---------- - bundle : `~rubin_sim.maf.metricBundles.MetricBundle` + bundle : `~rubin_sim.maf.metricBundles.MoMetricBundle` The metric bundle with a completeness summary statistic. completeness_metric : `~rubin_sim.maf.metric` The summary (completeness) metric to run on the bundle. @@ -56,6 +52,19 @@ def make_completeness_bundle(bundle, completeness_metric, h_mark=None, results_d Returns ------- mo_metric_bundle : `~rubin_sim.maf.metricBundles.MoMetricBundle` + + Notes + ----- + This utility turns a metric bundle which could evaluate a metric over + the population, into a secondary or mock metric bundle, using either + MoCompleteness or MoCumulativeCompleteness summary + metrics to marginalize over the population of moving objects. + This lets us use the plotHandler + plots.MetricVsH + to generate plots across the population, using the completeness + information. + This utility will also work with completeness metric run in order + to calculate fraction of the population, + or with MoCompletenessAtTime metric. """ bundle.set_summary_metrics(completeness_metric) # This step adds summary values at each point to the original metric - @@ -99,6 +108,70 @@ def make_completeness_bundle(bundle, completeness_metric, h_mark=None, results_d class MoMetricBundle(MetricBundle): + """Define a moving object metric bundle combination of + moving-object metric, moving-object slicer, and constraint. + + Parameters + ---------- + metric : `~rubin_sim.maf.metric` + The Metric class to run per slice_point + slicer : `~rubin_sim.maf.slicer` + The Slicer to apply to the incoming visit data (the observations). + constraint : `str` or None, opt + A (sql-style) constraint to apply to the visit data, to apply a + broad sub-selection. + stacker_list : `list` [`~rubin_sim.maf.stacker`], opt + A list of pre-configured stackers to use to generate additional + columns per visit. + These will be generated automatically if needed, but pre-configured + versions will override these. + run_name : `str`, opt + The name of the simulation being run. + This will be added to output files and plots. + Setting it prevents file conflicts when running the same + metric on multiple simulations, and + provides a way to identify which simulation is being analyzed. + info_label : `str` or None, opt + Information to add to the output metric data file name and plot labels. + If this is not provided, it will be auto-generated from the + constraint (if any). + Setting this provides an easy way to specify different + configurations of a metric, a slicer, + or just to rewrite your constraint into friendlier terms. + (i.e. a constraint like 'note not like "%DD%"' can become + "non-DD" in the file name and plot labels + by specifying info_label). + plot_dict : `dict` of plotting parameters, opt + Specify general plotting parameters, such as x/y/color limits. + display_dict : `dict` of display parameters, opt + Specify parameters for show_maf web pages, such as the + side bar labels and figure captions. + Keys: 'group', 'subgroup', 'caption', and 'order' + (such as to set metrics in filter order, etc) + child_metrics : `list` of `~rubin_sim.maf.metrics` + A list of child metrics to run to summarize the + primary metric, such as Discovery_At_Time, etc. + summary_metrics : `list` of `~rubin_sim.maf.metrics` + A list of summary metrics to run to summarize the + primary or child metric, such as CompletenessAtH, etc. + + Notes + ----- + Define the "thing" you are measuring, with a combination of + * metric (calculated per object) + * slicer (contains information on the moving objects + and their observations) + * constraint (an optional definition of a large subset of data) + + The MoMetricBundle also saves the child metrics to be used + to generate summary statistics over those metric values, + as well as the resulting summary statistic values. + + Plotting parameters and display parameters (for show_maf) are saved + in the MoMetricBundle, as well as additional info_label such as the + opsim run name, and relevant stackers and maps + to apply when calculating the metric values. + """ def __init__( self, metric, @@ -208,11 +281,18 @@ def _find_req_cols(self): raise NotImplementedError def set_child_bundles(self, child_metrics=None): - """ - Identify any child metrics to be run on this (parent) bundle. + """Identify any child metrics to be run on this (parent) bundle. and create the new metric bundles that will hold the child values, linking to this bundle. Remove the summaryMetrics from self afterwards. + + Parameters + ---------- + child_metrics : `~maf.MoMetric` + Child metrics work like reduce functions for non-moving objects. + They pull out subsets of the original metric values, typically + do more processing on those values, and then save them in + new metric bundles. """ self.child_bundles = {} if child_metrics is None: @@ -235,9 +315,13 @@ def set_child_bundles(self, child_metrics=None): self.summary_metrics = [] def compute_summary_stats(self, results_db=None): - """ - Compute summary statistics on metric_values, using summaryMetrics, + """Compute summary statistics on metric_values, using summaryMetrics, for self and child bundles. + + Parameters + ---------- + results_db : `~maf.ResultsDb` + Database which holds the summary statistic information. """ if self.summary_values is None: self.summary_values = {} @@ -266,6 +350,29 @@ def reduce_metric(self, reduce_func, reduce_plot_dict=None, reduce_display_dict= class MoMetricBundleGroup: + """Run groups of MoMetricBundles. + + Parameters + ---------- + bundle_dict : `dict` or `list` [`MoMetricBundles`] + Individual MoMetricBundles should be placed into a dictionary, + and then passed to the MoMetricBundleGroup. + The dictionary keys can then be used to identify MoMetricBundles + if needed -- and to identify new MetricBundles which could be + created if 'reduce' functions are run on a particular MoMetricBundle. + MoMetricBundles must all have the same Slicer (same set of moving + object observations). + out_dir : `str`, opt + Directory to save the metric results. + Default is the current directory. + results_db : `ResultsDb`, opt + A results database to store summary stat information. + If not specified, one will be created in the out_dir. + This database saves information about the metrics calculated, + including their summary statistics. + verbose : `bool`, opt + Flag to turn on/off verbose feedback. + """ def __init__(self, bundle_dict, out_dir=".", results_db=None, verbose=True): self.verbose = verbose self.bundle_dict = bundle_dict @@ -316,7 +423,7 @@ def _check_compatible(self, metric_bundle1, metric_bundle2): return True def _find_compatible(self, test_keys): - """ "Private utility to find which metricBundles with keys in the + """Private utility to find which metricBundles with keys in the list 'test_keys' can be calculated at the same time -- having the same slicer, constraint, maps, and compatible stackers. @@ -325,10 +432,11 @@ def _find_compatible(self, test_keys): ----------- test_keys : `list` List of the dictionary keys (of self.bundle_dict) to - test for compatibilility. + test for compatibility. + Returns -------- - list of lists + compatible_lists : `list` [`lists`] Returns test_keys, split into separate lists of compatible metricBundles. """ @@ -374,9 +482,6 @@ def run_constraint(self, constraint): match this constraint in the metricBundleGroup. Also calculates child metrics and summary statistics, and writes all to disk. - (work is actually done in _runCompatible, so that only completely - compatible sets of metricBundles - run at the same time). Parameters ---------- diff --git a/rubin_sim/maf/metrics/mo_metrics.py b/rubin_sim/maf/metrics/mo_metrics.py index 4b56c59b4..c75d50fa6 100644 --- a/rubin_sim/maf/metrics/mo_metrics.py +++ b/rubin_sim/maf/metrics/mo_metrics.py @@ -41,6 +41,61 @@ def _set_vis(sso_obs, snr_limit, snr_col, vis_col): class BaseMoMetric(BaseMetric): """Base class for the moving object metrics. Intended to be used with the Moving Object Slicer. + + Parameters + ---------- + cols : `list` [`str`] or None + List of the column names needed to run the metric. + These columns must be in the moving object data files. + metric_name : `str` or None + Name of the metric. + If None, a name is created based on the class name. + units : `str`, opt + Units for the resulting metric values. + badval : `float`, opt + Flag "bad" value returned if the metric cannot be calculated. + comment : `str` or None, opt + A default comment to use for the DisplayDict (display caption) + if no value is provided elsewhere. + child_metrics : `list` [`~BaseChildMetric`] or None, opt + A list of child metrics to run on the results of (this) metric. + Child metrics take the metric results from this metric and + add some additional processing or pull out a particular value. + The results of the child metric are passed to a new MoMetricBundle. + app_mag_col : `str`, opt + Name of the apparent magnitude column + in the object observations. Typically added by a stacker. + app_mag_v_col : `str`, opt + Name of the apparent magnitude V band column + in the objects observations. + m5_col : `str`, opt + Name of the m5 limiting magnitude column + in the objects observations. + night_col : `str`, opt + Name of the night column in the objects observations. + mjd_col : `str`, opt + Name of the MJD column in the objects observations. + snr_col : `str`, opt + Name of the column describing the SNR of this object in a given + observation, in the objects observations. Added by a stacker. + vis_col : `str`, opt + Name of the column describing the probability of detecting + this object in a given observation. Added by a stacker. + ra_col : `str`, opt + Name of the column describing the RA of this object + in the objects observations. + dec_col : `str`, opt + Name of the column describing the Declination of this object + in the objects observations. + seeing_col : `str`, opt + Name of the column describing the seeing to be used in + evaluations of this object, in the objects observations. + Tpyically this is the geometric seeing, for evaluating streak length. + exp_time_col : `str`, opt + Name of the exposure time column in the objects observations. + filter_col : `str`, opt + Name of the column describing the filter used for a given observation, + in the objects observations. """ def __init__( @@ -139,8 +194,8 @@ class BaseChildMetric(BaseMoMetric): ---------- parentDiscoveryMetric : `~BaseMoMetric` The 'parent' metric which generated the metric data used - calculate this 'child' metric. - badval : `float`, optional + calculate this 'child' metric. + badval : `float`, opt Value to return when metric cannot be calculated. """ @@ -203,8 +258,14 @@ def run(self, sso_obs, orb, hval): class NObsNoSinglesMetric(BaseMoMetric): """Count the number of observations for an SSobject, without singles. Don't include observations where it was a single observation on a night. - """ + Parameters + ---------- + snr_limit : `float` or None + If the snr_limit is None, detection of the object in a visit is + determined using the _calcVis method (completeness calculation). + If not None, the snr is calculated and used as a flat cutoff instead. + """ def __init__(self, snr_limit=None, **kwargs): super().__init__(**kwargs) self.snr_limit = snr_limit @@ -221,8 +282,15 @@ def run(self, sso_obs, orb, hval): class NNightsMetric(BaseMoMetric): - """Count the number of distinct nights an SSobject is observed.""" + """Count the number of distinct nights an SSobject is observed. + Parameters + ---------- + snr_limit : `float` or None + If the snr_limit is None, detection of the object in a visit is + determined using the _calcVis method (completeness calculation). + If not None, the snr is calculated and used as a flat cutoff instead. + """ def __init__(self, snr_limit=None, **kwargs): super().__init__(**kwargs) self.snr_limit = snr_limit @@ -238,8 +306,14 @@ def run(self, sso_obs, orb, hval): class ObsArcMetric(BaseMoMetric): """Calculate the difference in time between the first and last observation of an SSobject. - """ + Parameters + ---------- + snr_limit : `float` or None + If the snr_limit is None, detection of the object in a visit is + determined using the _calcVis method (completeness calculation). + If not None, the snr is calculated and used as a flat cutoff instead. + """ def __init__(self, snr_limit=None, **kwargs): super().__init__(**kwargs) self.snr_limit = snr_limit @@ -257,26 +331,26 @@ class DiscoveryMetric(BaseMoMetric): Parameters ---------- - n_obs_per_night : `int`, optional + n_obs_per_night : `int`, opt Number of observations required within a single night. Default 2. - t_min : `float`, optional + t_min : `float`, opt Minimum time span between observations in a single night, in days. Default 5 minutes (5/60/24). - t_max : `float`, optional + t_max : `float`, opt Maximum time span between observations in a single night, in days. Default 90 minutes. - n_nights_per_window : `int`, optional + n_nights_per_window : `int`, opt Number of nights required with observations, within the track window. Default 3. - t_window : `int`, optional + t_window : `int`, opt Number of nights included in the track window. Default 15. - snr_limit : None or `float`, optional + snr_limit : None or `float`, opt SNR limit to use for observations. If snr_limit is None, (default), then it uses the completeness calculation added to the 'vis' column (probabilistic visibility, based on 5-sigma limit). If snr_limit is not None, it uses this SNR value as a cutoff. - metricName : `str`, optional + metricName : `str`, opt The metric name to use. Default will be to construct Discovery_nObsPerNightxnNightsPerWindowintWindow. @@ -404,8 +478,15 @@ class DiscoveryNChancesMetric(BaseChildMetric): Returns total number of discovery opportunities. Child metric to be used with the Discovery Metric. - """ + Parameters + ---------- + parentDiscoveryMetric : `~BaseMoMetric` + The 'parent' metric which generated the metric data used + calculate this 'child' metric. + badval : `float`, opt + Value to return when metric cannot be calculated. + """ def __init__( self, parent_discovery_metric, @@ -434,8 +515,15 @@ def run(self, sso_obs, orb, hval, metric_values): class DiscoveryNObsMetric(BaseChildMetric): """Calculates the number of observations in the first discovery track of an SSobject. - """ + Parameters + ---------- + parentDiscoveryMetric : `~BaseMoMetric` + The 'parent' metric which generated the metric data used + calculate this 'child' metric. + badval : `float`, opt + Value to return when metric cannot be calculated. + """ def __init__(self, parent_discovery_metric, badval=0, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) # The number of the discovery chance to use. @@ -451,7 +539,16 @@ def run(self, sso_obs, orb, hval, metric_values): class DiscoveryTimeMetric(BaseChildMetric): - """Returns the time of the first discovery track of an SSobject.""" + """Returns the time of the first discovery track of an SSobject. + + Parameters + ---------- + parentDiscoveryMetric : `~BaseMoMetric` + The 'parent' metric which generated the metric data used + calculate this 'child' metric. + badval : `float`, opt + Value to return when metric cannot be calculated. + """ def __init__(self, parent_discovery_metric, t_start=None, badval=-999, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) @@ -475,7 +572,16 @@ def run(self, sso_obs, orb, hval, metric_values): class DiscoveryDistanceMetric(BaseChildMetric): - """Returns the distance of the first discovery track of an SSobject.""" + """Returns the distance of the first discovery track of an SSobject. + + Parameters + ---------- + parentDiscoveryMetric : `~BaseMoMetric` + The 'parent' metric which generated the metric data used + calculate this 'child' metric. + badval : `float`, opt + Value to return when metric cannot be calculated. + """ def __init__(self, parent_discovery_metric, distance_col="geo_dist", badval=-999, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) @@ -497,7 +603,16 @@ def run(self, sso_obs, orb, hval, metric_values): class DiscoveryRadecMetric(BaseChildMetric): - """Returns the RA/Dec of the first discovery track of an SSobject.""" + """Returns the RA/Dec of the first discovery track of an SSobject. + + Parameters + ---------- + parentDiscoveryMetric : `~BaseMoMetric` + The 'parent' metric which generated the metric data used + calculate this 'child' metric. + badval : `float`, opt + Value to return when metric cannot be calculated. + """ def __init__(self, parent_discovery_metric, badval=None, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) @@ -520,7 +635,16 @@ def run(self, sso_obs, orb, hval, metric_values): class DiscoveryEclonlatMetric(BaseChildMetric): """Returns the ecliptic lon/lat and solar elong of the first discovery - track of an SSobject.""" + track of an SSobject. + + Parameters + ---------- + parentDiscoveryMetric : `~BaseMoMetric` + The 'parent' metric which generated the metric data used + calculate this 'child' metric. + badval : `float`, opt + Value to return when metric cannot be calculated. + """ def __init__(self, parent_discovery_metric, badval=None, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) @@ -543,7 +667,16 @@ def run(self, sso_obs, orb, hval, metric_values): class DiscoveryVelocityMetric(BaseChildMetric): - """Returns the sky velocity of the first discovery track of an SSobject.""" + """Returns the sky velocity of the first discovery track of an SSobject. + + Parameters + ---------- + parentDiscoveryMetric : `~BaseMoMetric` + The 'parent' metric which generated the metric data used + calculate this 'child' metric. + badval : `float`, opt + Value to return when metric cannot be calculated. + """ def __init__(self, parent_discovery_metric, badval=-999, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) @@ -570,6 +703,24 @@ class ActivityOverTimeMetric(BaseMoMetric): Splits observations into time periods set by 'window', then looks for observations within each window, and reports what fraction of the total windows receive 'nObs' visits. + + Parameters + ---------- + window : `float` + The (repeated) time period to search for activity. + snr_limit : None or `float`, opt + SNR limit to use for observations. + If snr_limit is None, then it uses + the completeness calculation added to the 'vis' column + (probabilistic visibility, based on 5-sigma limit). + If snr_limit is not None, it uses this SNR value as a cutoff. + survey_years : `float`, opt + The length of time of the survey. The test `window` is repeated + over `survey_years`, and then a fraction calculated from the + number of bins in which observations were acquired compared to the + total number of bins. + metric_name : `str` or None, opt + Name for the metric. If None, one is created from the class name. """ def __init__(self, window, snr_limit=5, survey_years=10.0, metric_name=None, **kwargs): @@ -601,6 +752,30 @@ class ActivityOverPeriodMetric(BaseMoMetric): Count the fraction of the orbit (when split into n_bins) that receive observations, in order to have a chance to detect activity. + + Parameters + ---------- + bin_size : `float` + Like `window` for the ActivityOverTimeMetric, + but describes how much of the orbit + (considered in mean motion) should be included in a given bin. + In degrees. + snr_limit : None or `float`, opt + SNR limit to use for observations. + If snr_limit is None, then it uses + the completeness calculation added to the 'vis' column + (probabilistic visibility, based on 5-sigma limit). + If snr_limit is not None, it uses this SNR value as a cutoff. + q_col : `str`, opt + The name of the q column in the objects orbit data. + e_col : `str`, opt + The name of the eccentricity column in the objects orbit data. + t_peri_col : `str`, opt + The name of the time of perihelion column in the objects orbit data. + anomaly_col : `str`, opt + The name of the mean anomaly column in the objects orbit data. + metric_name : `str` or None, opt + Name for the metric. If None, one is created from the class name. """ def __init__( @@ -744,19 +919,18 @@ class HighVelocityNightsMetric(BaseMoMetric): Parameters ---------- - psf_facto r: `float`, optional + psf_factor: `float`, opt Object velocity (deg/day) must be >= 24 * psf_factor * seeingGeom (") / visitExpTime (s). Default is 2 (i.e. object trailed over 2 psf's). - n_obs_per_night : `int`, optional + n_obs_per_night : `int`, opt Number of observations per night required. Default 2. snr_limit : `float` or None If snr_limit is set as a float, then requires object to be above snr_limit SNR in the image. If snr_limit is None, this uses the probabilistic 'visibility' calculated by the vis stacker, which means SNR ~ 5. - Default is None. - velocity_col : `str`, optional + velocity_col : `str`, opt Name of the velocity column in the obs file. Default 'velocity'. (note this is deg/day). @@ -806,8 +980,33 @@ class LightcurveInversionAsteroidMetric(BaseMoMetric): lightcurve inversion. This metric is generally applicable only to inner solar system objects (NEOs, MBAs). - Determine if the cumulative sum of observations of a target are - enough to enable lightcurve inversion for shape modeling. + Parameters + ---------- + weight_det : `float`, opt + The SNR-weighted number of detections required (per bandpass in any + ONE of the filters in filterlist). + Default 50. + snr_limit : `float` or None, opt + If snr_limit is set as a float, then requires object to be + above snr_limit SNR in the image. + If snr_limit is None, this uses the probabilistic 'visibility' + calculated by the vis stacker, + which means SNR ~ 5. Default is None. + snr_max : `float`, opt + Maximum value toward the SNR-weighting to consider. Default 100. + filterlist : `list` [`str`], opt + The filters which the lightcurve inversion could be based on. + Requirements must be met in one of these filters. + + Returns + ------- + metric_value : `int` + 0 (could not perform lightcurve inversion) or 1 (could) + + Notes + ----- + This metric determines if the cumulative sum of observations of a + target are enough to enable lightcurve inversion for shape modeling. For this to be true, multiple conditions need to be satisfied: @@ -828,29 +1027,6 @@ class LightcurveInversionAsteroidMetric(BaseMoMetric): Contributed by Steve Chesley, Wes Fraser, Josef Durech, and the inner solar system working group. - - Parameters - ---------- - weight_det : `float`, optional - The SNR-weighted number of detections required (per bandpass in any - ONE of the filters in filterlist). - Default 50. - snr_limit : `float` or None, optional - If snr_limit is set as a float, then requires object to be - above snr_limit SNR in the image. - If snr_limit is None, this uses the probabilistic 'visibility' - calculated by the vis stacker, - which means SNR ~ 5. Default is None. - snr_max : `float`, optional - Maximum value toward the SNR-weighting to consider. Default 100. - filterlist : `list` [`str`], optional - The filters which the lightcurve inversion could be based on. - Requirements must be met in one of these filters. - - Returns - ------- - metric_value : `int` - 0 (could not perform lightcurve inversion) or 1 (could) """ def __init__( @@ -909,34 +1085,20 @@ class ColorAsteroidMetric(BaseMoMetric): object. This metric is appropriate for MBAs and NEOs, and other inner solar system objects. - The metric evaluates if the SNR-weighted number of observations are - enough to determine an approximate lightcurve and phase function -- - and from this, then a color for the asteroid can be determined. - The assumption is that you must fit the lightcurve/phase function - in each bandpass, and could do this well-enough if you have at least - weight_det SNR-weighted observations in the bandpass. - e.g. to find a g-r color, you must have 10 (SNR-weighted) obs in g - and 10 in r. - - For more details, see - https://docs.google.com/document/d/1GAriM7trpTS08uanjUF7PyKALB2JBTjVT7Y6R30i0-8/edit?usp=sharing - - Contributed by Wes Fraser, Steven Chesley - & the inner solar system working group. Parameters ---------- - weight_det: float, optional + weight_det: float, opt The SNR-weighted number of detections required (per bandpass in any ONE of the filters in filterlist). Default 10. - snr_limit: float or None, optional + snr_limit: float or None, opt If snr_limit is set as a float, then requires object to be above snr_limit SNR in the image. If snr_limit is None, this uses the probabilistic 'visibility' calculated by the vis stacker, which means SNR ~ 5. Default is None. - snr_max: float, optional + snr_max: float, opt Maximum value toward the SNR-weighting to consider. Default 20. Returns @@ -951,6 +1113,23 @@ class ColorAsteroidMetric(BaseMoMetric): i.e. colors = g-r, r-i, i-z, OR r-i, i-z, z-y.. 3 = All 5 from grizy. i.e. colors g-r, r-i, i-z, z-y. 4 = All 6 filters (ugrizy) -- best possible! add u-g. + + Notes + ----- + The metric evaluates if the SNR-weighted number of observations are + enough to determine an approximate lightcurve and phase function -- + and from this, then a color for the asteroid can be determined. + The assumption is that you must fit the lightcurve/phase function + in each bandpass, and could do this well-enough if you have at least + weight_det SNR-weighted observations in the bandpass. + e.g. to find a g-r color, you must have 10 (SNR-weighted) obs in g + and 10 in r. + + For more details, see + https://docs.google.com/document/d/1GAriM7trpTS08uanjUF7PyKALB2JBTjVT7Y6R30i0-8/edit?usp=sharing + + Contributed by Wes Fraser, Steven Chesley + & the inner solar system working group. """ def __init__(self, weight_det=10, snr_max=20, snr_limit=None, **kwargs): @@ -1016,39 +1195,25 @@ class LightcurveColorOuterMetric(BaseMoMetric): """Calculate the liklihood of being able to calculate a color and lightcurve for outer solar system objects. - This metric is appropriate for outer solar system objects, - such as TNOs and SDOs. - - This metric evaluates whether the number of observations is - sufficient to fit a lightcurve in a primary and secondary bandpass. - The primary bandpass requires more observations than the secondary. - Essentially, it's a complete lightcurve in one or both bandpasses, with at - least a semi-complete lightcurve in the secondary band. - - The lightcurve/color can be calculated with any two of the - bandpasses in filterlist. - - Contributed by Wes Fraser. - Parameters ---------- - snr_limit : `float` or None, optional + snr_limit : `float` or None, opt If snr_limit is set as a float, then requires object to be above snr_limit SNR in the image. If snr_limit is None, this uses the probabilistic 'visibility' calculated by the vis stacker, which means SNR ~ 5. Default is None. - num_req : `int`, optional + num_req : `int`, opt Number of observations required for a lightcurve fitting. Default 30. - num_sec_filt : `int`, optional + num_sec_filt : `int`, opt Number of observations required in a secondary band for color only. Default 20. - filterlist : `list` [`str`], optional + filterlist : `list` [`str`], opt Filters that the primary/secondary measurements can be in. Returns ------- - flag : `ont` + flag : `int` A flag that indicates whether a color/lightcurve was generated in: 0 = no lightcurve (although may have had 'color' in one or more band) @@ -1057,6 +1222,22 @@ class LightcurveColorOuterMetric(BaseMoMetric): 2+ = lightcurves in more than one filter (or lightcurve + color) e.g. lightcurve in 2 bands, with additional color information in another = 3. + + Notes + ----- + This metric is appropriate for outer solar system objects, + such as TNOs and SDOs. + + This metric evaluates whether the number of observations is + sufficient to fit a lightcurve in a primary and secondary bandpass. + The primary bandpass requires more observations than the secondary. + Essentially, it's a complete lightcurve in one or both bandpasses, with at + least a semi-complete lightcurve in the secondary band. + + The lightcurve/color can be calculated with any two of the + bandpasses in filterlist. + + Contributed by Wes Fraser. """ def __init__( @@ -1100,27 +1281,23 @@ class InstantaneousColorMetric(BaseMoMetric): """Identify SSobjects which could have observations suitable to determine instanteous colors. - Generally, this is not the mode LSST would work in - - the lightcurves of the objects mean that the time interval would have to - be quite short. - This is roughly defined as objects which have more than n_pairs pairs of observations with SNR greater than snr_limit, in bands bandOne and bandTwo, within n_hours. Parameters ---------- - n_pairs : `int`, optional + n_pairs : `int`, opt The number of pairs of observations (in each band) that must be within n_hours. Default 1. - snr_limit : `float`, optional + snr_limit : `float`, opt The SNR limit for the observations. Default 10. - n_hours : `float`, optional + n_hours : `float`, opt The time interval between observations in the two bandpasses (hours). Default 0.5 hours. - b_one : `str`, optional + b_one : `str`, opt The first bandpass for the color. Default 'g'. - b_two : `str`, optional + b_two : `str`, opt The second bandpass for the color. Default 'r'. Returns @@ -1183,51 +1360,51 @@ class KnownObjectsMetric(BaseMoMetric): The default values are calibrated using the NEOs larger than 140m discovered in the last 20 years and assuming a 30% completeness in 2017. - Note: the default parameteres here were set up in ~2012, and are likely + Note: the default parameters here were set up in ~2012, and are likely out of date (potentially adding another epoch of discovery). Parameters ----------- - elong_thresh : `float`, optional + elong_thresh : `float`, opt The cutoff in solar elongation to consider an object 'visible'. Default 100 deg. - v_mag_thresh1 : `float`, optional + v_mag_thresh1 : `float`, opt The magnitude threshold for previously known objects. Default 20.0. - eff1 : `float`, optional + eff1 : `float`, opt The likelihood of actually achieving each individual input observation. If the input observations include one observation per day, an 'eff' value of 0.3 would mean that (on average) only one third of these observations would be achieved. This is similar to the level for LSST, which can cover the visible sky every 3-4 days. Default 0.1 - t_switch1 : `float`, optional + t_switch1 : `float`, opt The (MJD) time to switch between v_mag_thresh1 + eff1 to v_mag_thresh2 + eff2, e.g. the end of the first period. Default 53371 (2005). - v_mag_thresh2 : `float`, optional + v_mag_thresh2 : `float`, opt The magnitude threshhold for previously known objects. Default 22.0. This is based on assuming PS and other surveys will be efficient down to V=22. - eff2 : `float`, optional + eff2 : `float`, opt The efficiency of observations during the second period of time. Default 0.1 - t_switch2 : `float`, optional + t_switch2 : `float`, opt The (MJD) time to switch between v_mag_thresh2 + eff2 to v_mag_thresh3 + eff3. Default 57023 (2015). - v_mag_thresh3 : `float`, optional + v_mag_thresh3 : `float`, opt The magnitude threshold during the third period. Default 22.0, based on PS1 + Catalina. - eff3 : `float`, optional + eff3 : `float`, opt The efficiency of observations during the third period. Default 0.1 - t_switch3 : `float`, optional + t_switch3 : `float`, opt The (MJD) time to switch between v_mag_thresh3 + eff3 to v_mag_thresh4 + eff4. Default 59580 (2022). - v_mag_thresh4 : `float`, optional + v_mag_thresh4 : `float`, opt The magnitude threshhold during the fourth (last) period. Default 22.0, based on PS1 + Catalina. - eff4 : `float`, optional + eff4 : `float`, opt The efficiency of observations during the fourth (last) period. Default 0.2 """ diff --git a/rubin_sim/maf/metrics/pair_metric.py b/rubin_sim/maf/metrics/pair_metric.py index 032f1180c..0d29c7901 100644 --- a/rubin_sim/maf/metrics/pair_metric.py +++ b/rubin_sim/maf/metrics/pair_metric.py @@ -11,12 +11,24 @@ class PairMetric(BaseMetric): Parameters ---------- - match_min : `float` + mjd_col : `str`, opt + Name of the MJD column in the observations. + metric_name : `str`, opt + Name for the resulting metric. If None, one is constructed from + the class name. + match_min : `float`, opt Minutes after first observation to count something as a match. - match_max : `float` + match_max : `float`, opt Minutes after first observation to count something as a match. - bin_size : `float` + bin_size : `float`, opt bin_size to use (minutes). + Note that bin_size should be considerably smaller than the difference + between match_min and match_max. + + Result + ------ + num_pairs : `float` + The number of pairs of visits within the min and max time range. """ def __init__( diff --git a/rubin_sim/maf/metrics/periodic_detect_metric.py b/rubin_sim/maf/metrics/periodic_detect_metric.py index 7cb4cbd42..70d1132fe 100644 --- a/rubin_sim/maf/metrics/periodic_detect_metric.py +++ b/rubin_sim/maf/metrics/periodic_detect_metric.py @@ -19,18 +19,24 @@ class PeriodicDetectMetric(BaseMetric): Parameters ---------- - period : `float` or `array` + mjd_col : `str`, opt + Name of the MJD column in the observations. + periods : `float` or `np.ndarray`, (N,), opt The period of the star (days). - Can be a single value, or an array. If an array, amplitude and starMag - should be arrays of equal length. - amplitude : `float` + Can be a single value, or an array. + If an array, amplitude and starMag should be arrays of equal length. + amplitudes : `float`, opt The amplitude of the stellar variability, (mags). - starMag : `float` + m5_col : `str`, opt + The name of the m5 limiting magnitude column in the observations. + metric_name : `str`, opt + The name for the metric. + starMags : `float`, opt The mean magnitude of the star in r (mags). - sig_level : `float` + sig_level : `float`, opt The value to use to compare to the p-value when deciding if we can reject the null hypothesis. - sed_template : `str` + sed_template : `str`, opt The stellar SED template to use to generate realistic colors (default is an F star, so RR Lyrae-like) diff --git a/rubin_sim/maf/metrics/phase_gap_metric.py b/rubin_sim/maf/metrics/phase_gap_metric.py index be8479ab7..cf739d11d 100644 --- a/rubin_sim/maf/metrics/phase_gap_metric.py +++ b/rubin_sim/maf/metrics/phase_gap_metric.py @@ -99,7 +99,27 @@ def reduce_largest_gap(self, metric_val): # To fit a periodic source well, you need to cover the full phase, # and fit the amplitude. class PeriodicQualityMetric(BaseMetric): - """Evaluate phase coverage over a given period.""" + """Evaluate phase coverage over a given period. + + Parameters + ---------- + mjd_col : `str`, opt + Name of the MJD column in the observations. + period : `float`, opt + Period to check. + m5_col : `str`, opt + Name of the m5 column in the observations. + metric_name : `str`, opt + Name of the metric. + star_mag : `float`, opt + Magnitude of the star to simulate coverage for. + + Returns + ------- + value : `float` + Value representing phase_coverage * amplitude_snr. + Ranges from 0 (poor) to 1. + """ def __init__( self, @@ -119,7 +139,9 @@ def __init__( ) def _calc_phase(self, data_slice): - """1 is perfectly balanced phase coverage, 0 is no effective coverage.""" + """1 is perfectly balanced phase coverage, + 0 is no effective coverage. + """ angles = data_slice[self.mjd_col] % self.period angles = angles / self.period * 2.0 * np.pi x = np.cos(angles) diff --git a/rubin_sim/maf/metrics/sn_n_sn_metric.py b/rubin_sim/maf/metrics/sn_n_sn_metric.py index c7edf7244..7b252cee2 100644 --- a/rubin_sim/maf/metrics/sn_n_sn_metric.py +++ b/rubin_sim/maf/metrics/sn_n_sn_metric.py @@ -330,16 +330,18 @@ def season_length(self, seasons, data_slice, zseason): Parameters ----------- seasons : `list` [`int`] - list of seasons to process + list of seasons to process data_slice : `np.ndarray`, (N,)` - array of observations + array of observations + zseason : `pd.DataFrame` + redshift infos per season Returns -------- seasons : `list` [`int`] - list of seasons to process - dur_z : `pandas.DataFrame` - season lengths vs z + list of seasons to process + dur_z : `pd.DataFrame` + season lengths vs z """ # if seasons = -1: process the seasons seen in data if seasons == [-1]: @@ -370,16 +372,16 @@ def get_season_info(self, dfa, zseason, min_duration=60.0): Parameters -------------- - dfa : pandas df - dat to process - zseason : pandas df - redshift infos per season + dfa : `pd.DataFrame` + data to process + zseason : `pd.DataFrame` + redshift infos per season min_duration : `float`, opt - min season length to be accepted (default: 60 days) + min season length to be accepted (default: 60 days) Returns -------- - pandas df with season length infos + season_info : `pd.DataFrame` with season length infos """ @@ -409,13 +411,13 @@ def step_lc(self, obs, gen_par, x1=-2.0, color=0.2): Parameters --------------- obs : array - observations + observations gen_par : array - simulation parameters + simulation parameters x1 : `float`, opt - stretch value (default: -2.0) + stretch value (default: -2.0) color : `float`, opt - color value (default: 0.2) + color value (default: 0.2) Returns ---------- @@ -431,12 +433,12 @@ def step_efficiencies(self, lc): Parameter ------------- - lc: pandas df + lc: `pd.DataFrame` light curves Returns ----------- - pandas df with efficiencies + `pd.DataFrame` with efficiencies """ sn_effis = ( @@ -476,7 +478,7 @@ def step_nsn(self, sn_effis, dur_z): Parameters ---------- - sn_effis : pandas df + sn_effis : `pd.DataFrame` data with efficiencies of observation dur_z : array array of season length @@ -500,13 +502,13 @@ def season_info(self, grp, min_duration): Parameters -------------- - grp : pandas df group + grp : `pd.DataFrame` group min_duration : `float` minimal duration for a season to be considered Returns --------- - pandas df with the following cols: + `pd.DataFrame` with the following cols: - Nvisits: number of visits for this group - N_xx: number of visits in xx where xx is defined in self.bandstat @@ -542,17 +544,17 @@ def duration_z(self, grp, min_duration=60.0): Parameters -------------- - grp : pandas df group + grp : `pd.DataFrame` group data to process: season infos min_duration : `float`, opt min season length for a season to be processed (deafult: 60 days) Returns ---------- - pandas df with season_length, z, T0_min and T0_max cols + `pd.DataFrame` with season_length, z, T0_min and T0_max cols """ - + ## IS THIS CALLED FROM ANYWHERE? daymin = grp["MJD_min"].values daymax = grp["MJD_max"].values dur_z = pd.DataFrame(self.zrange, columns=["z"]) @@ -579,7 +581,7 @@ def calc_daymax(self, grp, daymax_step): Parameters -------------- - grp: group (pandas df sense) + grp: group (`pd.DataFrame` sense) group of data to process with the following cols: t0_min: T0 min value (per season) t0_max: T0 max value (per season) @@ -588,7 +590,7 @@ def calc_daymax(self, grp, daymax_step): Returns ---------- - pandas df with daymax, min_rf_phase, max_rf_phase values + `pd.DataFrame` with daymax, min_rf_phase, max_rf_phase values """ @@ -614,9 +616,9 @@ def gen_lc(self, grp, gen_par_orig, x1, color): Parameters --------------- - grp : pandas group + grp : pd group observations to process - gen_par_orig : pandas df + gen_par_orig : `pd.DataFrame` simulation parameters x1 : `float` SN stretch @@ -625,7 +627,7 @@ def gen_lc(self, grp, gen_par_orig, x1, color): Returns ---------- - light curves as pandas df + light curves as `pd.DataFrame` """ season = grp.name @@ -655,12 +657,12 @@ def sn_effi(self, lc): Parameters --------------- - lc : pandas grp + lc : pd grp light curve Returns ---------- - pandas df of sn efficiencies vs z + `pd.DataFrame` of sn efficiencies vs z """ if self.verbose: @@ -773,10 +775,10 @@ def sigma_s_nparams(self, grp): Parameters --------------- - grp: pandas df of flux derivatives wrt SN parameters + grp: `pd.DataFrame` of flux derivatives wrt SN parameters Returns ---------- - Diagonal elements of the inverted matrix (as pandas df) + Diagonal elements of the inverted matrix (as `pd.DataFrame`) """ # params = ['x0', 'x1', 'daymax', 'color'] @@ -817,7 +819,7 @@ def efficiencies(self, dfo): Parameters --------------- - df: pandas df + df: `pd.DataFrame` data to process """ @@ -980,7 +982,7 @@ def z_season(self, seasons, data_slice): if seasons == [-1]: seasons = np.unique(data_slice[self.season_col]) - # pandas df with zmin, zmax, zstep per season + # `pd.DataFrame` with zmin, zmax, zstep per season zseason = pd.DataFrame(seasons, columns=["season"]) zseason["zmin"] = self.zmin zseason["zmax"] = self.zmax @@ -1007,12 +1009,12 @@ def nsn_from_rate(self, grp): Parameters --------------- - grp: pandas df + grp: `pd.DataFrame` data to process Returns ----------- - pandas df with z and nsn_expected as cols + `pd.DataFrame` with z and nsn_expected as cols """ durinterp_z = interp1d( @@ -1044,7 +1046,7 @@ def coadd(self, obs): Parameters ------------ data : `pd.DataFrame` - pandas df of observations + `pd.DataFrame` of observations Returns ------- @@ -1143,12 +1145,12 @@ def nsn_expected_z(self, grp): Parameters -------------- - grp: pandas df group + grp: `pd.DataFrame` group data to process: season infos Returns ---------- - pandas df with season_length, z, nsn_expected cols + `pd.DataFrame` with season_length, z, nsn_expected cols """ @@ -1176,12 +1178,12 @@ def zlim_or_nsn(self, effi, sntype="faint", zlim=-1.0): Parameters --------------- - effi: pandas df - data to process - sntype: str, opt - type of SN to consider for estimation (default: faint) - zlim: float, opt - redshift limit + effi : `pd.DataFrame` + data to process + sntype : `str`, opt + type of SN to consider for estimation (default: faint) + zlim : `float`, opt + redshift limit Returns ----------- @@ -1238,13 +1240,13 @@ def zlim(self, grp, sn_type="faint"): Parameters --------------- - grp: pandas group + grp: pd group sn_type: str, opt type of SN to estimate zlim (default: faint) Returns ------------ - pandas df with the metric as cols + zcomp : `pd.DataFrame` with the metric as cols """ zcomp = -1 @@ -1259,13 +1261,14 @@ def nsn(self, grp, sn_type="medium"): Parameters --------------- - grp: pandas group + grp: pd group sn_type: str, opt type of SN to estimate zlim (default: medium) Returns ------------ - pandas df with the metric as cols + nsn : `pd.DataFrame` + Dataframe with the metric as cols """ nsn = -1 @@ -1276,26 +1279,25 @@ def nsn(self, grp, sn_type="medium"): def get_nsn(self, effi, durinterp_z, zmin, zmax, zstep): """ - Method to estimate to total number of SN: NSN = Sum(effi(z)*rate(z)) - - Parameters - --------------- - effi: 1D interpolator - efficiencies vs z - durinterp_z: 1D interpolator - duration vs z - zmin: float - redshift min - zmax: float - redshift max - zstep: float - redshift step + Method to estimate to total number of SN: NSN = Sum(effi(z)*rate(z)) + + Parameters + ----------- + effi : 1D interpolator + efficiencies vs z + durinterp_z : 1D interpolator + duration vs z + zmin : `float` + redshift min + zmax : `float` + redshift max + zstep : `float` + redshift step Returns ---------- - total number of SN up to zmax - - + tot_sn : `int` + total number of SN up to zmax """ zz, rate, err_rate, nsn, err_nsn = self.rate_sn( @@ -1320,14 +1322,15 @@ def check_dur_z(self, dur_z, nmin=2): Parameters ---------------- - dur_z: pandas df + dur_z: `pd.DataFrame` data to process nmin: int, opt minimal number of redshift points per season (default: 2) Returns ----------- - pandas df with seasons having at least nmin points in redshift + dur_z_subset : `pd.DataFrame` + dur_z but only with seasons having at least nmin points in redshift """ diff --git a/rubin_sim/maf/metrics/sn_sl_metric.py b/rubin_sim/maf/metrics/sn_sl_metric.py index 9d6994f36..489fd52e0 100644 --- a/rubin_sim/maf/metrics/sn_sl_metric.py +++ b/rubin_sim/maf/metrics/sn_sl_metric.py @@ -10,56 +10,63 @@ class SNSLMetric(metrics.BaseMetric): - """Calculate the number of expected well-measured strongly lensed SN (per data_slice). - - The number of expected strongly lensed SN detections with a well-measured time delay is given by: - - N (lensed SNe Ia with well measured time delay) = 45.7 * - survey_area / (20000 deg^2) * - cumulative_season_length / (2.5 years) / - (2.15 * exp(0.37 * gap_median_all_filter)) - - where: - survey_area: survey area (in deg2) - cumulative_season_length: cumulative season length (in years) - gap_median_all_filter: median gap (all filters) (in days) - - (reference? metric originated from Simon Huber and Phillipe Gris) + """Calculate the number of expected well-measured strongly lensed SN + (per data_slice). Parameters ---------- - metricName : str, optional + metric_name : `str`, optional metric name Default : SNCadenceMetric - mjd_col : str, optional + mjd_col : `str`, optional mjd column name Default : observationStartMJD, - filter_col : str, optional + filter_col : `str`, optional filter column name Default: filter - night_col : str, optional + night_col : `str`, optional night column name Default : night - m5_col : str, optional + m5_col : `str`, optional individual visit five-sigma limiting magnitude (m5) column name Default : fiveSigmaDepth - season: int (list) or -1, optional - season to process (default: -1: all seasons) - nfilters_min : int, optional + season : `list` [`int`] or None, optional + season to process (default: None: all seasons) + A list with [-1] processes all seasons, as does None. + nfilters_min : `int`, optional The number of filters to demand in a season Default: 4. - min_season_obs : int, optional + min_season_obs : `int`, optional Minimum number of observations per season. Default 5. - m5mins : dict, optional + m5mins : `dict`, optional Minimum individual image depth for visit to 'count'. - Default None uses {'u': 22.7, 'g': 24.1, 'r': 23.7, 'i': 23.1, 'z': 22.2, 'y': 21.4}. - maps : list, optional - List of maps to use. Default is the dustmap, to reduce m5 limiting mags accordingly. + Default None uses + {'u': 22.7, 'g': 24.1, 'r': 23.7, 'i': 23.1, 'z': 22.2, 'y': 21.4}. + maps : `list`, optional + List of maps to use. + Default is the dustmap, to reduce m5 limiting mags accordingly. Returns ------- - float + n_slsn : `float` Number of expected well-measured strongly lensed SN + + Notes + ----- + The number of expected strongly lensed SN detections with a + well-measured time delay is given by: + + N (lensed SNe Ia with well measured time delay) = 45.7 * + survey_area / (20000 deg^2) * + cumulative_season_length / (2.5 years) / + (2.15 * exp(0.37 * gap_median_all_filter)) + + where: + survey_area: survey area (in deg2) + cumulative_season_length: cumulative season length (in years) + gap_median_all_filter: median gap (all filters) (in days) + + (reference? metric originated from Simon Huber and Phillipe Gris) """ def __init__( @@ -69,7 +76,7 @@ def __init__( filter_col="filter", night_col="night", m5_col="fiveSigmaDepth", - season=[-1], + season=None, nfilters_min=4, min_season_obs=5, m5mins=None, @@ -86,7 +93,10 @@ def __init__( super().__init__(col=cols, metric_name=metric_name, maps=self.maps, units="N SL", **kwargs) self.bad_val = 0 - self.season = season + if season is None: + self.season = [-1] + else: + self.season = season self.bands = "ugrizy" if m5mins is None: self.m5mins = { @@ -105,19 +115,20 @@ def __init__( self.phot_properties = DustValues() def n_lensed(self, area, cadence, season_length): - """ + """Estimate the number of lensed supernovae. + Parameters ----------- - area : float + area : `float` Area in square degrees related to this data_slice (sq deg) - gap_median : float + gap_median : `float` median gap between nights with visits (days) - any filter - cumul_season : float + cumul_season : `float` length of the season or period of consideration (years) Returns ------- - float + n_lensed_s_ne__ia : `float` Number of strongly lensed SN expected in this area """ # estimate the number of lensed supernovae @@ -142,7 +153,8 @@ def run(self, data_slice, slice_point=None): if len(data_slice) == 0: return self.bad_val - # Crop it down so things are coadded per night per filter at the median MJD time + # Crop it down so things are coadded per night per + # filter at the median MJD time night_slice = collapse_night( data_slice, night_col=self.night_col, @@ -150,12 +162,14 @@ def run(self, data_slice, slice_point=None): m5_col=self.m5_col, mjd_col=self.mjd_col, ) - # Calculate the dust extinction-corrected m5 values and cut visits which don't meet self.m5mins + # Calculate the dust extinction-corrected m5 values + # and cut visits which don't meet self.m5mins for f in np.unique(night_slice[self.filter_col]): in_filt = np.where(night_slice[self.filter_col] == f)[0] a_x = self.phot_properties.ax1[f] * slice_point["ebv"] night_slice[self.m5_col][in_filt] = night_slice[self.m5_col][in_filt] - a_x - # Set the visits which fall below the minimum to an obvious non-valid value + # Set the visits which fall below the minimum + # to an obvious non-valid value night_slice[self.m5_col][in_filt] = np.where( night_slice[self.m5_col][in_filt] > self.m5mins[f], night_slice[self.m5_col][in_filt], @@ -166,13 +180,15 @@ def run(self, data_slice, slice_point=None): if len(idxs[0]) == 0: return self.badval - # Reset, with coadded per-night/per-filter values, skipping any too-shallow visits. + # Reset, with coadded per-night/per-filter values, + # skipping any too-shallow visits. night_slice = np.sort(night_slice[idxs], order=self.mjd_col) # get the pixel area area = hp.nside2pixarea(slice_point["nside"], degrees=True) - # Note that 'seasons' is the same length as night_slice, and contains integer (season) + float (day) + # Note that 'seasons' is the same length as night_slice, + # and contains integer (season) + float (day) seasons = calc_season(np.degrees(slice_point["ra"]), night_slice[self.mjd_col]) season_ints = np.floor(seasons) diff --git a/rubin_sim/maf/metrics/sn_snr_metric.py b/rubin_sim/maf/metrics/sn_snr_metric.py index 3b71a6f8c..aa656910a 100644 --- a/rubin_sim/maf/metrics/sn_snr_metric.py +++ b/rubin_sim/maf/metrics/sn_snr_metric.py @@ -246,7 +246,6 @@ def snr_slice(self, data_slice, j=-1, output_q=None): # tile m5, MJDs, and seasons to estimate all fluxes and SNR at once m5_vals = np.tile(data_slice[self.m5_col], (len(time_for_lc), 1)) - season_vals = np.tile(data_slice[self.season_col], (len(time_for_lc), 1)) # estimate fluxes and snr in SNR function fluxes_tot, snr = self.snr(time_for_lc, m5_vals, flag, t0_lc) @@ -613,14 +612,14 @@ def plot_history(self, fluxes, mjd, flag, snr, t0_lc, dates): tot_label = [] tot_label_snr = [] - labs = [l.get_label() for l in tot_label] + labs = [ll.get_label() for ll in tot_label] ax[0].legend(tot_label, labs, ncol=1, loc="best", prop={"size": fontsize}, frameon=False) ax[0].set_ylabel("Flux [e.sec$^{-1}$]", fontsize=fontsize) ax[1].set_xlabel("MJD", fontsize=fontsize) ax[1].set_ylabel("SNR", fontsize=fontsize) ax[1].legend() - labs = [l.get_label() for l in tot_label_snr] + labs = [ll.get_label() for ll in tot_label_snr] ax[1].legend( tot_label_snr, labs, diff --git a/rubin_sim/maf/metrics/string_count_metric.py b/rubin_sim/maf/metrics/string_count_metric.py index 741dfcbce..d606bf1da 100644 --- a/rubin_sim/maf/metrics/string_count_metric.py +++ b/rubin_sim/maf/metrics/string_count_metric.py @@ -21,27 +21,29 @@ def __call__(self, indict): class StringCountMetric(BaseMetric): """Count up the number of times each string appears in a column. - Dynamically builds reduce functions for each unique string value, so summary sats can be - named the same as strings in the simData array without knowing the values of those trings ahead of time. + Dynamically builds reduce functions for each unique string value, + so summary stats can be named the same as strings in the + simData array without knowing the values of those strings ahead of time. + + + Parameters + ---------- + metric_name : `str`, opt + Name of the metric. + col : `str`, opt + Column name that has strings to look at. + percent : `bool`, opt + Normalize and return results as percents rather than raw count. """ def __init__(self, metric_name="stringCountMetric", col="filter", percent=False, **kwargs): - """ - Parameters - ---------- - - col: str ('filter') - Column name that has strings to look at - percent : bool (False) - Normalize and return results as percents ranther than raw count - """ if percent: units = "percent" else: units = "count" self.percent = percent cols = [col] - super(StringCountMetric, self).__init__(cols, metric_name, units=units, metric_dtype=object, **kwargs) + super().__init__(cols, metric_name, units=units, metric_dtype=object, **kwargs) self.col = col def run(self, data_slice, slice_point=None): @@ -58,7 +60,8 @@ def run(self, data_slice, slice_point=None): metric_value[key] = counter[key] if self.percent: norm = sum(metric_value[0]) / 100.0 - # Not sure I really like having to loop here, but the dtype is inflexible + # Not sure I really like having to loop here, + # but the dtype is inflexible for key in metric_value.dtype.names: metric_value[key] = metric_value[key] / norm diff --git a/rubin_sim/moving_objects/base_obs.py b/rubin_sim/moving_objects/base_obs.py index b04ad98e2..0e2ae6345 100644 --- a/rubin_sim/moving_objects/base_obs.py +++ b/rubin_sim/moving_objects/base_obs.py @@ -71,7 +71,7 @@ class BaseObs: Name of the Rotator column in the obsData. Default 'rotSkyPos'. obs_degrees: `bool`, optional Whether the observational data is in degrees or radians. - Default True (degrees). + Default True (degrees). outfile_name : `str`, optional The output file name. Default is 'lsst_obs.dat'. diff --git a/rubin_sim/moving_objects/cheby_fits.py b/rubin_sim/moving_objects/cheby_fits.py index 7e4d9d314..a056d31e3 100644 --- a/rubin_sim/moving_objects/cheby_fits.py +++ b/rubin_sim/moving_objects/cheby_fits.py @@ -24,29 +24,6 @@ class ChebyFits: positions with a constrained Chebyshev Polynomial, using the routines in chebyshevUtils.py. - Many chebyshev polynomials are used to fit one moving object over - a given timeperiod; typically, the length of each segment is typically - about 2 days for MBAs. The start and end of each segment must match - exactly, and the entire segments must fit into the total timespan an - integer number of times. This is accomplished by setting n_decimal to - the number of decimal places desired in the 'time' value. - For faster moving objects, this number needs be greater to allow for - smaller subdivisions. - It's tempting to allow flexibility to the point of not - enforcing this non-overlap; however, then the resulting ephemeris - may have multiple values depending on which polynomial segment was - used to calculate the ephemeris. - - The length of each chebyshev polynomial is related to the number of - ephemeris positions used to fit that polynomial by ngran: - length = timestep * ngran - The length of each polynomial is adjusted so that the residuals in - RA/Dec position are less than sky_tolerance - default = 2.5mas. - The polynomial length (and the resulting residuals) is affected - by ngran (i.e. timestep). - - Default values are based on Yusra AlSayaad's work. - Parameters ---------- orbits_obj : `rubin_sim.moving_objects.Orbits` @@ -91,6 +68,31 @@ class ChebyFits: n_decimal places. Default 10. For LSST SIMS moving object database, this should be 13 decimal places for NEOs and 0 for all others. + + Notes + ----- + Many chebyshev polynomials are used to fit one moving object over + a given timeperiod; typically, the length of each segment is typically + about 2 days for MBAs. The start and end of each segment must match + exactly, and the entire segments must fit into the total timespan an + integer number of times. This is accomplished by setting n_decimal to + the number of decimal places desired in the 'time' value. + For faster moving objects, this number needs be greater to allow for + smaller subdivisions. + It's tempting to allow flexibility to the point of not + enforcing this non-overlap; however, then the resulting ephemeris + may have multiple values depending on which polynomial segment was + used to calculate the ephemeris. + + The length of each chebyshev polynomial is related to the number of + ephemeris positions used to fit that polynomial by ngran: + length = timestep * ngran + The length of each polynomial is adjusted so that the residuals in + RA/Dec position are less than sky_tolerance - default = 2.5mas. + The polynomial length (and the resulting residuals) is affected + by ngran (i.e. timestep). + + Default values are based on Yusra AlSayaad's work. """ def __init__( @@ -255,7 +257,7 @@ def _round_length(self, length): Returns ------- - `float` + length : `float` The rounded length value. """ length = round(length, self.n_decimal) diff --git a/rubin_sim/moving_objects/ooephemerides.py b/rubin_sim/moving_objects/ooephemerides.py index 96e031785..177471ec3 100644 --- a/rubin_sim/moving_objects/ooephemerides.py +++ b/rubin_sim/moving_objects/ooephemerides.py @@ -37,12 +37,6 @@ class PyOrbEphemerides: """Generate ephemerides and propagate orbits, using the python interface to Oorb. - Typical usage: - - >>> pyephs = PyOrbEphemerides() - >>> pyephs.setOrbits(orbits) - >>> ephs = pyephs.generateEphemerides(times, timeScale, obscode) - PyOrbEphemerides handles the packing and unpacking of the fortran style arrays that pyoorb uses, to and from more user-friendly pandas arrays. @@ -51,6 +45,14 @@ class PyOrbEphemerides: ephfile : `str`, optional Planetary ephemerides file for Oorb (i.e. de430 or de405). Default $OORB_DATA/de430.dat ($OORB_DATA = $OORB_DIR/data). + + Examples + -------- + Typical usage: + + >>> pyephs = PyOrbEphemerides() + >>> pyephs.setOrbits(orbits) + >>> ephs = pyephs.generateEphemerides(times, timeScale, obscode) """ def __init__(self, ephfile=None): @@ -231,7 +233,7 @@ def convert_orbit_format(self, orb_format="CAR"): Parameters ---------- - format : `str`, optional + orb_format : `str`, optional Format to convert orbital elements into. """ oorb_elem, err = oo.pyoorb.oorb_element_transformation( @@ -277,11 +279,14 @@ def _generate_oorb_ephs_full(self, eph_times, obscode="I11", eph_mode="N"): Parameters ---------- - ephtimes : `np.ndarray` + eph_times : `np.ndarray` Ephemeris times in oorb format (see self.convertTimes) obscode : `int` or `str`, optional The observatory code for ephemeris generation. Default=I11 (Cerro Pachon). + eph_mode : `str`, optional + What dynamical mode to use for generating ephemerides - + "N" (n-body) or "2" (2-body). Returns ------- @@ -301,6 +306,34 @@ def _generate_oorb_ephs_full(self, eph_times, obscode="I11", eph_mode="N"): def _convert_oorb_ephs_full(self, oorb_ephs, by_object=True): """Converts oorb ephemeris array to np.ndarray. + Here we convert to a numpy.ndarray, grouped either by object (default) + or by time (if by_object=False). + The resulting array is composed of columns (of each ephemeris element), + where each column is 2-d array with first axes either 'object' + or 'time'. + - if by_object = True : [ephemeris elements][object][time] + (i.e. the 'ra' column = 2-d array, where the [0] axis (length) + equals the number of ephTimes) + - if by_object = False : [ephemeris elements][time][object] + (i.e. the 'ra' column = 2-d arrays, where the [0] axis (length) + equals the number of objects) + + Parameters + ---------- + oorb_ephs : `np.ndarray` + The oorb-formatted ephemeris values + by_object : `bool`, optional + If True (default), resulting converted ephemerides are grouped + by object. + If False, resulting converted ephemerides are grouped by time. + + Returns + ------- + ephemerides : `np.ndarray` + The re-arranged ephemeris values, in a 3-d array. + + Notes + ----- The oorb ephemeris array is a 3-d array organized as: (object / times / eph@time) [objid][time][ephemeris information @ that time] with elements @@ -338,32 +371,6 @@ def _convert_oorb_ephs_full(self, oorb_ephs, by_object=True): ! (32) helio ecliptic cartesian coordinates for the observatory (au) ! (33) helio ecliptic cartesian coordinates for the observatory (au) ! (34) true anomaly (currently only a dummy value) - - Here we convert to a numpy.ndarray, grouped either by object (default) - or by time (if by_object=False). - The resulting array is composed of columns (of each ephemeris element), - where each column is 2-d array with first axes either 'object' - or 'time'. - - if by_object = True : [ephemeris elements][object][time] - (i.e. the 'ra' column = 2-d array, where the [0] axis (length) - equals the number of ephTimes) - - if by_object = False : [ephemeris elements][time][object] - (i.e. the 'ra' column = 2-d arrays, where the [0] axis (length) - equals the number of objects) - - Parameters - ---------- - oorb_ephs : `np.ndarray` - The oorb-formatted ephemeris values - by_object : `bool`, optional - If True (default), resulting converted ephemerides are grouped - by object. - If False, resulting converted ephemerides are grouped by time. - - Returns - ------- - ephemerides : `np.ndarray` - The re-arranged ephemeris values, in a 3-d array. """ ephs = np.swapaxes(oorb_ephs, 2, 0) velocity = np.sqrt(ephs[3] ** 2 + ephs[4] ** 2) @@ -445,6 +452,22 @@ def _convert_oorb_ephs_basic(self, oorb_ephs, by_object=True): """Converts oorb ephemeris array to numpy recarray, with labeled columns. + Parameters + ---------- + oorb_ephs : `np.ndarray` + The oorb-formatted ephemeris values + by_object : `bool`, optional + If True (default), resulting converted ephemerides are grouped + by object. + If False, resulting converted ephemerides are grouped by time. + + Returns + ------- + ephs : `np.ndarray` + The re-arranged ephemeris values, in a 3-d array. + + Notes + ----- The oorb ephemeris array is a 3-d array organized as: (object / times / eph@time) [objid][time][ephemeris information @ that time] with ephemeris @@ -472,20 +495,6 @@ def _convert_oorb_ephs_basic(self, oorb_ephs, by_object=True): - if by_object = False : [ephemeris elements][time][object] (i.e. the 'ra' column = 2-d arrays, where the [0] axis (length) equals the number of objects) - - Parameters - ---------- - oorb_ephs : `np.ndarray` - The oorb-formatted ephemeris values - by_object : `bool`, optional - If True (default), resulting converted ephemerides are grouped - by object. - If False, resulting converted ephemerides are grouped by time. - - Returns - ------- - ephs : `np.ndarray` - The re-arranged ephemeris values, in a 3-d array. """ ephs = np.swapaxes(oorb_ephs, 2, 0) velocity = np.sqrt(ephs[3] ** 2 + ephs[4] ** 2) @@ -525,25 +534,12 @@ def generate_ephemerides( ): """Calculate ephemerides for all orbits at times `times`. - The returned ephemerides are a numpy array that can be grouped - by object or by time. - - If they are grouped by object (by_object = True), the array - is organized as `ephemeris_values[object][time]`. - Here the "ra" column is a 2-d array where the [0] axis - length equals the number of ephemeris times. - - If they are grouped by time (by_object=False), the array - is organized as `ephemeris_values[time][object]`. - Here the "ra" column is a 2-d array where the [0] axis length - equals the number of objects. - All returned positions and angles are in degrees, velocities are degrees/day and distances are in AU. Parameters ---------- - ephtimes : `np.ndarray`, (N,) + times : `np.ndarray`, (N,) Ephemeris times. time_scale : `str`, optional Time scale (UTC, TT, TAI) of times. @@ -565,6 +561,22 @@ def generate_ephemerides( ------- ephemerides : `np.ndarray` The ephemeris values, organized as chosen by the user. + + + Notes + ----- + The returned ephemerides are a numpy array that can be grouped + by object or by time. + + If they are grouped by object (by_object = True), the array + is organized as `ephemeris_values[object][time]`. + Here the "ra" column is a 2-d array where the [0] axis + length equals the number of ephemeris times. + + If they are grouped by time (by_object=False), the array + is organized as `ephemeris_values[time][object]`. + Here the "ra" column is a 2-d array where the [0] axis length + equals the number of objects. """ if eph_mode.lower() in ("nbody", "n"): eph_mode = "N" diff --git a/rubin_sim/phot_utils/photometric_parameters.py b/rubin_sim/phot_utils/photometric_parameters.py index 0f6c5eb30..7b81a291d 100644 --- a/rubin_sim/phot_utils/photometric_parameters.py +++ b/rubin_sim/phot_utils/photometric_parameters.py @@ -185,9 +185,13 @@ def __init__( None will default to value from DefaultPhotometricParameters. bandpass : `str` The name of the bandpass for these parameters. - If set to an LSST bandpass, the constructor will initialize - PhotometricParameters to LSST default values for that bandpass, - excepting any parameters that have been set by hand. e.g. + + Examples + -------- + If `bandpass` is set to an LSST bandpass, + the constructor will initialize + PhotometricParameters to LSST default values for that bandpass, + excepting any parameters that have been set by hand. e.g. >>> myPhotParams = PhotometricParameters(nexp=3, bandpass='u') diff --git a/rubin_sim/selfcal/generate_catalog.py b/rubin_sim/selfcal/generate_catalog.py index 625499061..31cb71032 100644 --- a/rubin_sim/selfcal/generate_catalog.py +++ b/rubin_sim/selfcal/generate_catalog.py @@ -36,8 +36,15 @@ def treexyz(ra, dec): def build_tree(ra, dec, leafsize=100): """Build KD tree on RA/dec and set radius (via setRad) for matching. - ra, dec = RA and Dec values (in radians). - leafsize = the number of Ra/Dec pointings in each leaf node. + Parameters + ---------- + ra : `nd.ndarray`, (N,) + RA values of the tree (in radians) + dec : `nd.ndarray`, (N,) + Dec values of the tree (in radians). + leafsize : `float`, opt + The number of RA/Dec pointings in each leafnode. + Default 100. """ if np.any(np.abs(ra) > np.pi * 2.0) or np.any(np.abs(dec) > np.pi * 2.0): raise ValueError("Expecting RA and Dec values to be in radians.") @@ -66,22 +73,22 @@ def generate_catalog( Parameters ---------- - visits : `np.array` + visits : `np.array`, (N,) A numpy array with the properties of the visits. Expected columns of fiveSigmaDepth, ra, dec, rotSkyPos (all degrees) offsets : `list` of rubin_sim.selfcal.Offset classes A list of instatiated classes that will apply offsets to the stars - lsst_filter : `str` ("r") + lsst_filter : `str` Which filter to use for the observed stars. - n_patches : `int` (16) + n_patches : `int` Number of patches to divide the FoV into. Must be an integer squared - radius_fov : `float` (1.8) + radius_fov : `float` Radius of the telescope field of view in degrees - seed : `float` (42) + seed : `float` Random number seed - uncert_floor : `float` (0.005) + uncert_floor : `float` Value to add in quadrature to magnitude uncertainties (mags) - verbose : `bool` (True) + verbose : `bool` Should we be verbose """ diff --git a/rubin_sim/selfcal/solver.py b/rubin_sim/selfcal/solver.py index 7cb21a42f..5d770c91e 100644 --- a/rubin_sim/selfcal/solver.py +++ b/rubin_sim/selfcal/solver.py @@ -16,18 +16,14 @@ class LsqrSolver: ---------- observations : `np.array` A numpy array of the observations. - Should have columns id, patch_id, observed_mag, mag_uncert - patch_out : `str` ("solved_patches.npz") - Output file for patch solutions, can be set to None - star_out : `str` ("solved_stars.npz") - Output file for star solutions, can be set to None - atol : `float` (1e-8) - Tolerance passed to lsqr - btol : `float` (1e-8) - Tolerance passed to lsqr - iter_lim : `int` (None) - Iteration limit passed to lsqr - show : `bool` (False) + Should have columns id, patch_id, observed_mag, mag_uncert. + atol : `float` + Tolerance passed to lsqr. + btol : `float` + Tolerance passed to lsqr. + iter_lim : `int` + Iteration limit passed to lsqr. + show : `bool` Should the lsqr solver print some iteration logs (False). """ From 645bd508cd714ba4a8561b6994a044f74bc9e5a6 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sun, 3 Mar 2024 23:35:29 -0800 Subject: [PATCH 24/26] Fix workflow black check --- .github/workflows/test_and_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_and_build.yaml b/.github/workflows/test_and_build.yaml index 391e47bce..bd9e0c433 100644 --- a/.github/workflows/test_and_build.yaml +++ b/.github/workflows/test_and_build.yaml @@ -72,7 +72,7 @@ jobs: - name: run black shell: bash -l {0} - run: black --check + run: black --check . - name: run tests shell: bash -l {0} From d339cc21e2081d2f5835e2383f16387b9b266f93 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Sun, 3 Mar 2024 23:36:56 -0800 Subject: [PATCH 25/26] isort --- .../lss_obs_strategy/galaxy_counts_metric_extended.py | 1 + 1 file changed, 1 insertion(+) diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py index 2dc5d08bd..0ea39ac18 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py @@ -20,6 +20,7 @@ __all__ = ("GalaxyCountsMetricExtended",) import warnings + import numpy as np import scipy From b263fb51735a5c5d7f1fbafa494b1a8276cdd943 Mon Sep 17 00:00:00 2001 From: Lynne Jones Date: Mon, 4 Mar 2024 00:44:03 -0800 Subject: [PATCH 26/26] Black --- .../galaxy_counts_metric_extended.py | 22 ++++++++++--------- rubin_sim/maf/metric_bundles/metric_bundle.py | 2 +- .../maf/metric_bundles/mo_metric_bundle.py | 2 ++ rubin_sim/maf/metrics/mo_metrics.py | 5 +++++ 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py index 0ea39ac18..894ec1370 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/galaxy_counts_metric_extended.py @@ -127,21 +127,23 @@ def _gal_count(self, apparent_mag, coaddm5): # calculate the change in the power law constant based on the band # colors assumed here: (u-g)=(g-r)=(r-i)=(i-z)= (z-y)=0.4 factor = 0.4 - band_correction_dict = {'u': -3.0 * factor, - 'g': -2.0 * factor, - 'r': -1.0 * factor, - 'i': 0.0, - 'z': factor, - 'y': 2.0 * factor} + band_correction_dict = { + "u": -3.0 * factor, + "g": -2.0 * factor, + "r": -1.0 * factor, + "i": 0.0, + "z": factor, + "y": 2.0 * factor, + } if self.filter_band not in band_correction_dict: - warnings.warn("Invalid band in GalaxyCountsMetricExtended. " - "Assuming i-band instead.") + warnings.warn("Invalid band in GalaxyCountsMetricExtended. " "Assuming i-band instead.") band_correction = band_correction_dict.get(self.filter_band, 0.0) # check to make sure that the z-bin assigned is valid. if (self.redshift_bin != "all") and (self.redshift_bin not in list(self.power_law_const_a.keys())): - warnings.warn("Invalid redshift bin in GalaxyCountsMetricExtended. " - "Defaulting to all redshifts.") + warnings.warn( + "Invalid redshift bin in GalaxyCountsMetricExtended. " "Defaulting to all redshifts." + ) self.redshift_bin = "all" # consider the power laws diff --git a/rubin_sim/maf/metric_bundles/metric_bundle.py b/rubin_sim/maf/metric_bundles/metric_bundle.py index f97f55621..860e0a2ea 100644 --- a/rubin_sim/maf/metric_bundles/metric_bundle.py +++ b/rubin_sim/maf/metric_bundles/metric_bundle.py @@ -88,7 +88,7 @@ class MetricBundle: * constraint (an optional definition of a large subset of data) Together these define a unique combination of an opsim benchmark, - or "metric bundle". + or "metric bundle". An example would be: a CountMetric, a HealpixSlicer, and a constraint of 'filter="r"'. diff --git a/rubin_sim/maf/metric_bundles/mo_metric_bundle.py b/rubin_sim/maf/metric_bundles/mo_metric_bundle.py index 59a993cb9..55a180298 100644 --- a/rubin_sim/maf/metric_bundles/mo_metric_bundle.py +++ b/rubin_sim/maf/metric_bundles/mo_metric_bundle.py @@ -172,6 +172,7 @@ class MoMetricBundle(MetricBundle): opsim run name, and relevant stackers and maps to apply when calculating the metric values. """ + def __init__( self, metric, @@ -373,6 +374,7 @@ class MoMetricBundleGroup: verbose : `bool`, opt Flag to turn on/off verbose feedback. """ + def __init__(self, bundle_dict, out_dir=".", results_db=None, verbose=True): self.verbose = verbose self.bundle_dict = bundle_dict diff --git a/rubin_sim/maf/metrics/mo_metrics.py b/rubin_sim/maf/metrics/mo_metrics.py index c75d50fa6..4d10adcd1 100644 --- a/rubin_sim/maf/metrics/mo_metrics.py +++ b/rubin_sim/maf/metrics/mo_metrics.py @@ -266,6 +266,7 @@ class NObsNoSinglesMetric(BaseMoMetric): determined using the _calcVis method (completeness calculation). If not None, the snr is calculated and used as a flat cutoff instead. """ + def __init__(self, snr_limit=None, **kwargs): super().__init__(**kwargs) self.snr_limit = snr_limit @@ -291,6 +292,7 @@ class NNightsMetric(BaseMoMetric): determined using the _calcVis method (completeness calculation). If not None, the snr is calculated and used as a flat cutoff instead. """ + def __init__(self, snr_limit=None, **kwargs): super().__init__(**kwargs) self.snr_limit = snr_limit @@ -314,6 +316,7 @@ class ObsArcMetric(BaseMoMetric): determined using the _calcVis method (completeness calculation). If not None, the snr is calculated and used as a flat cutoff instead. """ + def __init__(self, snr_limit=None, **kwargs): super().__init__(**kwargs) self.snr_limit = snr_limit @@ -487,6 +490,7 @@ class DiscoveryNChancesMetric(BaseChildMetric): badval : `float`, opt Value to return when metric cannot be calculated. """ + def __init__( self, parent_discovery_metric, @@ -524,6 +528,7 @@ class DiscoveryNObsMetric(BaseChildMetric): badval : `float`, opt Value to return when metric cannot be calculated. """ + def __init__(self, parent_discovery_metric, badval=0, **kwargs): super().__init__(parent_discovery_metric, badval=badval, **kwargs) # The number of the discovery chance to use.