diff --git a/.gitattributes b/.gitattributes index e171614..96d241f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,6 +1,4 @@ -liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SKY-SIM-Y_LVL1_0001-00.fits filter=lfs diff=lfs merge=lfs -text -liger_iris_pipeline/tests/data/2024A-P123-044_Liger_IMG1_SCI-J1458+1013-SIM-Y_LVL1_0001.fits filter=lfs diff=lfs merge=lfs -text -liger_iris_pipeline/tests/data/2024A-P123-044_Liger_IMG_SCI-J1458+1013-SIM-Y_LVL1_0001-00.fits filter=lfs diff=lfs merge=lfs -text -liger_iris_pipeline/tests/data/2024A-P123-044_Liger_IMG_SCI-J1458+1013-SIM-Y_LVL1_0001.fits filter=lfs diff=lfs merge=lfs -text -liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SCI-J1458+1013-SIM-Y_LVL0_0001-00.fits filter=lfs diff=lfs merge=lfs -text -liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SCI-J1458+1013-SIM-Y_LVL1_0001-00.fits filter=lfs diff=lfs merge=lfs -text +liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SKY-J1458+1013-Y-4.0_LVL1_0001-00.fits filter=lfs diff=lfs merge=lfs -text +liger_iris_pipeline/tests/data/2024B-P123-008_Liger_IMG_SCI-J1458+1013-Y-10.0_LVL1_0001-00.fits filter=lfs diff=lfs merge=lfs -text +liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SCI-J1458+1013-Y-4.0_LVL0_0001-00.fits filter=lfs diff=lfs merge=lfs -text +liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SCI-J1458+1013-Y-4.0_LVL1_0001-00.fits filter=lfs diff=lfs merge=lfs -text diff --git a/docs/assign_wcs/main.rst b/docs/assign_wcs/main.rst index 73315d6..afef78f 100644 --- a/docs/assign_wcs/main.rst +++ b/docs/assign_wcs/main.rst @@ -28,8 +28,6 @@ See an example script to process a file with FITS WCS keywords in the header:: import liger_iris_pipeline import astropy.units as u - liger_iris_pipeline.monkeypatch_jwst_datamodels() - input_filename ="iris_sim_gc_filterKN3_fix.fits" - output = liger_iris_pipeline.assign_wcs.AssignWcsStep.call(input_filename) + output = liger_iris_pipeline.assign_wcs.AssignWCSStep.call(input_filename) print(output.meta.wcs([0,4096]*u.pix,[0,4096]*u.pix)) diff --git a/docs/available-steps.rst b/docs/available-steps.rst index 9efae95..e2d28f8 100644 --- a/docs/available-steps.rst +++ b/docs/available-steps.rst @@ -4,10 +4,9 @@ Available algorithms .. toctree:: :maxdepth: 2 - drsrop_pipeline.rst dq_init/index.rst background/index.rst - dark_current/index.rst + dark_subtraction/index.rst normalize/index.rst pipeline/index.rst flatfield/index.rst diff --git a/docs/background/index.rst b/docs/background/index.rst deleted file mode 100644 index f4c63ba..0000000 --- a/docs/background/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _background_step: - -============================ -Background Image Subtraction -============================ - -.. toctree:: - :maxdepth: 2 - - description.rst - arguments.rst - reference_files.rst - -.. automodapi:: liger_iris_pipeline.background diff --git a/docs/dark_current/arguments.rst b/docs/dark_subtraction/arguments.rst similarity index 100% rename from docs/dark_current/arguments.rst rename to docs/dark_subtraction/arguments.rst diff --git a/docs/dark_current/description.rst b/docs/dark_subtraction/description.rst similarity index 100% rename from docs/dark_current/description.rst rename to docs/dark_subtraction/description.rst diff --git a/docs/dark_current/index.rst b/docs/dark_subtraction/index.rst similarity index 70% rename from docs/dark_current/index.rst rename to docs/dark_subtraction/index.rst index cb6ba6d..2ce22ed 100644 --- a/docs/dark_current/index.rst +++ b/docs/dark_subtraction/index.rst @@ -1,4 +1,4 @@ -.. _dark_current_step: +.. _dark_step: ======================== Dark Current Subtraction @@ -12,4 +12,4 @@ Dark Current Subtraction arguments.rst -.. automodapi:: liger_iris_pipeline.dark_current +.. automodapi:: liger_iris_pipeline.dark_subtraction diff --git a/docs/dark_current/reference_files.rst b/docs/dark_subtraction/reference_files.rst similarity index 77% rename from docs/dark_current/reference_files.rst rename to docs/dark_subtraction/reference_files.rst index ba3a745..8a007cf 100644 --- a/docs/dark_current/reference_files.rst +++ b/docs/dark_subtraction/reference_files.rst @@ -6,4 +6,4 @@ The ``dark`` step uses a DARK reference file. The ``dark`` reference file by default is retrieved from the CRDS, to use instead a local file, specify the ``override_dark`` keyword -in the configuration file specifying a path to a :py:class:`IRISImageModel`. +in the configuration file specifying a path to a :py:class:`ImagerModel`. diff --git a/docs/index.rst b/docs/index.rst index 9fbf5e7..899bb6a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,7 +2,7 @@ liger_iris_pipeline Documentation ********************************* -The Liger-IRIS Data Reduction System is based on the suite of packages from Space Telescope Science Institute +The Liger-IRIS Data Reduction System (DRS) is based on the suite of packages from Space Telescope Science Institute used to process data from JWST and NGRST. With ``stpipe`` and ``stdatamodels`` we can configure each step of a pipeline through one or more text based .INI style files, diff --git a/docs/pipeline/preprocess_flatfield.rst b/docs/pipeline/create_flatfield.rst similarity index 100% rename from docs/pipeline/preprocess_flatfield.rst rename to docs/pipeline/create_flatfield.rst diff --git a/docs/pipeline/image2.rst b/docs/pipeline/imager_stage2.rst similarity index 98% rename from docs/pipeline/image2.rst rename to docs/pipeline/imager_stage2.rst index 2c0b982..6a31dcf 100644 --- a/docs/pipeline/image2.rst +++ b/docs/pipeline/imager_stage2.rst @@ -4,7 +4,7 @@ image2: Stage 2 Imaging Processing ================================== :Config: image2.cfg -:Class: `~liger_iris_pipeline.pipeline.ProcessImagerL2Pipeline` +:Class: `~liger_iris_pipeline.pipeline.ImagerStage2Pipeline` Stage 2 imaging processing applies additional instrumental corrections and calibrations that result in a fully calibrated individual exposure. diff --git a/docs/pipeline/index.rst b/docs/pipeline/index.rst index 9e1aac3..18681ee 100644 --- a/docs/pipeline/index.rst +++ b/docs/pipeline/index.rst @@ -7,7 +7,7 @@ Pipeline Modules :maxdepth: 2 main.rst - image2.rst - preprocess_flatfield.rst + imager_stage2.rst + create_flatfield.rst .. automodapi:: liger_iris_pipeline.pipeline diff --git a/docs/drsrop_pipeline.rst b/docs/pipeline/stage1.rst similarity index 74% rename from docs/drsrop_pipeline.rst rename to docs/pipeline/stage1.rst index 6e401c5..6fe829e 100644 --- a/docs/drsrop_pipeline.rst +++ b/docs/pipeline/stage1.rst @@ -14,34 +14,13 @@ Non-linearity correction step corrects for the non-linear response of the detect Detector Readout Sampling ------------------------- The H4RG detecors are readout in non-destructive reads and sampling algorithms are used to estimate the accumulated electrons in the detector for an integration time. The sampling algorithms currently implemented in the pipeline are -- Correlated Double Sampling -- Multi Correlated Double Sampling +- (Multi)-Correlated Double Sampling - Up-the-Ramp Sampling -Requirements ------------- -The sampling algorithms use the drsrop_clib module. -https://github.com/oirlab/iris_readout - Running the Examples -------------------- -There is a example run in the liger_iris_pipeline/readout/tests directory. The sample ramp is given in the sample_ramp.fits. -sampling.cfg gives the configurations for the pipeline - -``sampling.cfg``: - -.. code-block:: ini - - name = "rop" - class = "liger_iris_pipeline.pipeline.ROPPipeline" - save_results = True - [steps] - [[nonlincorr]] - [[readoutsamp]] - mode='mcds' - -The sampling mode is set by the ``mode`` keyword which can be ``mcds`` or ``utr``. MCDS algorithm also requires the group number, the number of reads to be co-added. This is currently hardcoded in this version. - +There is a example run in the liger_iris_pipeline/readout directory. The sample ramp is given in the sample_ramp.fits. +sampling.cfg gives the configurations for the pipeline. Execute the pipeline from the command line ------------------------------------------ diff --git a/docs/background/arguments.rst b/docs/sky_subtraction/arguments.rst similarity index 100% rename from docs/background/arguments.rst rename to docs/sky_subtraction/arguments.rst diff --git a/docs/background/description.rst b/docs/sky_subtraction/description.rst similarity index 100% rename from docs/background/description.rst rename to docs/sky_subtraction/description.rst diff --git a/docs/sky_subtraction/index.rst b/docs/sky_subtraction/index.rst new file mode 100644 index 0000000..283fb55 --- /dev/null +++ b/docs/sky_subtraction/index.rst @@ -0,0 +1,14 @@ +.. _sky_subtraction_step: + +==================== +Sky Subtraction Step +==================== + +.. toctree:: + :maxdepth: 2 + + description.rst + arguments.rst + reference_files.rst + +.. automodapi:: liger_iris_pipeline.sky_subtraction diff --git a/docs/background/reference_files.rst b/docs/sky_subtraction/reference_files.rst similarity index 100% rename from docs/background/reference_files.rst rename to docs/sky_subtraction/reference_files.rst diff --git a/docs/subarrays.rst b/docs/subarrays.rst index 6fb29fb..61873c7 100644 --- a/docs/subarrays.rst +++ b/docs/subarrays.rst @@ -1,12 +1,11 @@ ********************* -Support for subarrays +Support for Subarrays ********************* Support for subarrays is currently only implemented for the imager and it supports datasets where only a custom subset of the 2D array is observed. -The keywords of :py:class:`IRISImageModel` which defines the parameters of the -subarray are:: +The keywords of :py:class:`ImagerModel` which defines the parameters of the subarray are:: model.meta.subarray.name = "CUSTOM" model.meta.subarray.id = 1 @@ -14,30 +13,30 @@ subarray are:: model.meta.subarray.ystart = ystart + 1 model.meta.subarray.xsize = xsize model.meta.subarray.ysize = ysize + model.meta.subarray.detysiz = detysiz + model.meta.subarray.detxsiz = detxsiz + model.meta.subarray.fastaxis = 0 + model.meta.subarray.slowaxis = 1 -Consider that following the FITS conventions the `xstart` and `ystart` keywords -are 1-based, therefore the default `xstart` is 1 and if you are slicing an -array in Python, you should add 1 to the keyword before saving it into the metadata. -`subarray.id` is saved into the FITS keyword `SUBARRID` and should be 0 for full -frames, 1 for the first subarray and so on. +Here the `xstart` and `ystart` keywords are converted from 0-based to 1-based indexing. +`xstart` and `ystart` are 1 by default. `subarray.id` is saved into the FITS keyword `SUBARRID` and should be 0 for full +frames, 1 for the first subarray and so on. The name of an entire frame is "FULL", whereas subarray names can be anything. -The name of an entire frame is "FULL". - -Subarrays and reference files +Subarrays and Reference Files ============================= Flat frames, darks and background files either in CRDS or using local overrides -can either be saved as subarrays -or can be saved as full frames. In case they are saved as full frames, after being -accessed they are sliced according to the metadata in the input subarray. +can either be saved as subarrays or can be saved as full frames. +In case they are saved as full frames, after being accessed they are sliced +according to the metadata in the input subarray. -Example usage +Example Usage ============= -As usage examples, check the notebooks or the ``test_image2.py`` script in the +As usage examples, check the notebooks or the ``test_imager_stage2.py`` script in the `unit tests folder in the repository `_ -Related steps +Related Steps ============= .. toctree:: diff --git a/liger_iris_pipeline/assign_wcs/assign_wcs.py b/liger_iris_pipeline/assign_wcs/assign_wcs.py index c0167c2..35910e2 100644 --- a/liger_iris_pipeline/assign_wcs/assign_wcs.py +++ b/liger_iris_pipeline/assign_wcs/assign_wcs.py @@ -64,5 +64,5 @@ def load_wcs(input_model, reference_files={}): wcs = WCS(pipeline) output_model.meta.wcs = wcs - output_model.meta.cal_step.assign_wcs = "COMPLETE" + return output_model diff --git a/liger_iris_pipeline/assign_wcs/assign_wcs_step.py b/liger_iris_pipeline/assign_wcs/assign_wcs_step.py index 4ca98c4..cdd85f2 100755 --- a/liger_iris_pipeline/assign_wcs/assign_wcs_step.py +++ b/liger_iris_pipeline/assign_wcs/assign_wcs_step.py @@ -20,6 +20,8 @@ class AssignWCSStep(LigerIRISStep): # eventually ['distortion' , 'specwcs', 'wavelengthrange'] reference_file_types = [] + class_alias = "assign_wcs" + def process(self, input, *args, **kwargs): reference_file_names = {} if isinstance(input, str): @@ -33,7 +35,7 @@ def process(self, input, *args, **kwargs): log.warning("assign_wcs expects ImageModel as input.") log.warning("Skipping assign_wcs step.") result = input_model.copy() - result.meta.cal_step.assign_wcs = "SKIPPED" + self.status = "SKIPPED" else: # Get reference files for reftype in self.reference_file_types: @@ -43,6 +45,7 @@ def process(self, input, *args, **kwargs): # Assign wcs result = load_wcs(input_model, reference_file_names) + self.status = "COMPLETE" # Close model if opened manually if isinstance(input, str): diff --git a/liger_iris_pipeline/associations/__init__.py b/liger_iris_pipeline/associations/__init__.py index 9ca5cb8..5ac54be 100644 --- a/liger_iris_pipeline/associations/__init__.py +++ b/liger_iris_pipeline/associations/__init__.py @@ -1,14 +1,16 @@ # https://jwst-pipeline.readthedocs.io/en/latest/jwst/associations/association_reference.html#ref-asn-core-methods from .association_base import LigerIRISAssociation -from .imager_level0 import ImagerL0Association -from .imager_level1 import ImagerL1Association +from .level0 import L0Association +from .level1 import L1Association +from .subarray import SubarrayAssociation from .utils import load_asn __all__ = [ "LigerIRISAssociation", - "ImagerL0Association", - "ImagerL1Association", + "L0Association", + "L1Association", + "SubarrayAssociation", "load_asn" ] diff --git a/liger_iris_pipeline/associations/association_base.py b/liger_iris_pipeline/associations/association_base.py index a51e041..79e71f0 100644 --- a/liger_iris_pipeline/associations/association_base.py +++ b/liger_iris_pipeline/associations/association_base.py @@ -56,12 +56,12 @@ def from_product(cls, product : dict): return asn @classmethod - def from_member(cls, filename : str): - input_model = datamodels.open(filename) + def from_member(cls, member : str | datamodels.LigerIRISDataModel): + input_model = datamodels.open(member) product = { "members": [ { - "expname": filename, + "expname": input_model.filename, "exptype": input_model.meta.exposure.type, }, ] diff --git a/liger_iris_pipeline/associations/imager_level0.py b/liger_iris_pipeline/associations/imager_level0.py deleted file mode 100644 index 67913b2..0000000 --- a/liger_iris_pipeline/associations/imager_level0.py +++ /dev/null @@ -1,11 +0,0 @@ -from . import LigerIRISAssociation - -__all__ = ['ImagerL0Association'] - - -class ImagerL0Association(LigerIRISAssociation): - """ - TODO: - Implement subarrays in the association. - """ - pass \ No newline at end of file diff --git a/liger_iris_pipeline/associations/level0.py b/liger_iris_pipeline/associations/level0.py new file mode 100644 index 0000000..69ede39 --- /dev/null +++ b/liger_iris_pipeline/associations/level0.py @@ -0,0 +1,6 @@ +from . import LigerIRISAssociation + +__all__ = ['L0Association'] + +class L0Association(LigerIRISAssociation): + pass \ No newline at end of file diff --git a/liger_iris_pipeline/associations/imager_level1.py b/liger_iris_pipeline/associations/level1.py similarity index 64% rename from liger_iris_pipeline/associations/imager_level1.py rename to liger_iris_pipeline/associations/level1.py index 5f5bcc6..920fd3c 100644 --- a/liger_iris_pipeline/associations/imager_level1.py +++ b/liger_iris_pipeline/associations/level1.py @@ -2,9 +2,9 @@ __all__ = ['ImagerL1Association'] -class ImagerL1Association(LigerIRISAssociation): +class L1Association(LigerIRISAssociation): """ - IRIS Imager Level 1 Association + Imager Level 1 Association TODO: Implement subarrays in the association. """ diff --git a/liger_iris_pipeline/associations/subarray.py b/liger_iris_pipeline/associations/subarray.py new file mode 100644 index 0000000..f2bc17e --- /dev/null +++ b/liger_iris_pipeline/associations/subarray.py @@ -0,0 +1,9 @@ +from . import LigerIRISAssociation + +__all__ = ['SubarrayAssociation'] + +class SubarrayAssociation(LigerIRISAssociation): + """ + Subarray Association for any level. + """ + pass \ No newline at end of file diff --git a/liger_iris_pipeline/base_step.py b/liger_iris_pipeline/base_step.py index 9853427..fb9a4f5 100644 --- a/liger_iris_pipeline/base_step.py +++ b/liger_iris_pipeline/base_step.py @@ -1,14 +1,26 @@ from functools import wraps +from collections.abc import Sequence import warnings +import copy +import gc + +import stpipe.utilities +from . import datamodels +from astropy.io import fits +from pathlib import Path +import stpipe.log import os +from typing import Self +import yaml +import stpipe +from stpipe import cmdline from stpipe import Step from stpipe import config_parser from . import datamodels from . import __version__ -from jwst.lib.suffix import remove_suffix from stpipe import crds_client __all__ = [ @@ -17,71 +29,118 @@ class LigerIRISStep(Step): - # NOTE: This is kind of a hack, change if possible - spec = """ - output_ext = string(default='.fits') # Output file type - """ + exclude_spec = [ + "pre_hooks", "post_hooks", + "output_use_index", "output_use_model", + "suffix", "search_output_file", "input_dir", 'output_ext', + 'steps' # Make spec only contain config for THIS class, not substeps + ] - @classmethod - def call(cls, *args, return_step : bool = False, **kwargs): + #spec = """ + # output_dir = str(default=None) # Directory path for output files + #""" + + def __init__( + self, + config_file : str | None = None, + **kwargs + ): """ - Hack to allow the pipeline to return the step or pipeline object that is created + Create a `Step` instance. + Configuration is determined according to: + 1. Class's spec object + 2. config_file + 3. kwargs """ - filename = None - if len(args) > 0: - filename = args[0] - config, config_file = cls.build_config(filename, **kwargs) + self.init_logger() + self._reference_files_used = [] - if "class" in config: - del config["class"] + # TODO: Refactor this back into stpipe Step classmethods. - if "logcfg" in config: - try: - self.log.load_configuration(config["logcfg"]) - except Exception as e: - raise RuntimeError( - f"Error parsing logging config {config['logcfg']}" - ) from e - del config["logcfg"] + # Load config for this Step (not a pipeline) + self.config_file = config_file - name = config.get("name", None) - instance = cls.from_config_section(config, name=name, config_file=config_file) + if self.config_file is not None: + config = config_parser.load_config_file(config_file) + else: + config = config_parser.ConfigObj() + + # Parse the config from the spec and any provided kwargs + spec = self.load_spec_file() + kwargs = config_parser.config_from_dict( + kwargs, + spec, + root_dir=None, + allow_missing=False + ) - result = instance.run(*args) + # Merge the spec with the config + config.merge(kwargs) - if return_step: - return result, instance - else: - return result + # Set the config parameters as member variables + for key, val in config.items(): + if key not in ("class", "steps", "config_file"): + _val = self.parse_config_kwarg(key, val, spec) + setattr(self, key, _val) + + def process(self, *args, **kwargs): + """ + This is where real work happens. Every Step subclass has to + override this method. The default behaviour is to raise a + NotImplementedError exception. + """ + raise NotImplementedError(f"Class {self.__class__} does not implement instance method `process`.") + + # @classmethod + # def call(cls, input, return_step : bool = False, config_file : str | None = None, **kwargs): + # """ + # Override call so the Pipeline or Step instance is optionally returned. + + # Parameters: + # input (str | LigerIRISDataModel | list[str] | list[LigerIRISDataModel]): + # 1. filename of datamodel + # 2. filename of ASN + # 3. datamodel + # 4. list of filenames + # 5. list of datamodels + # """ + # instance = cls(config_file=config_file) + # result = instance.run(input, **kwargs) + + # if return_step: + # return result, instance + # else: + # return result @classmethod def _datamodels_open(cls, init, **kwargs): return datamodels.open(init, **kwargs) - def finalize_result(self, result, reference_files_used): - if isinstance(result, datamodels.LigerIRISDataModel): - result.meta.calibration_software_revision = __version__ - - if len(reference_files_used) > 0: - for ref_name, filename in reference_files_used: - if hasattr(result.meta.ref_file, ref_name): - getattr(result.meta.ref_file, ref_name).name = filename - result.meta.ref_file.crds.sw_version = crds_client.get_svn_version() - result.meta.ref_file.crds.context_used = crds_client.get_context_used(result.crds_observatory) - if self.parent is None: - self.log.info(f"Results used CRDS context: {result.meta.ref_file.crds.context_used}") + def finalize_result(self, result : datamodels.LigerIRISDataModel, reference_files_used : dict[str, str]): + result.meta.drs_version = __version__ + from .pipeline import LigerIRISPipeline + if not isinstance(self, LigerIRISPipeline): + if hasattr(result.meta.drs_step, f"{self.class_alias}"): + setattr(result.meta.drs_step, f"{self.class_alias}", self.status) + else: + self.log.warning(f"Could not update status for {result.meta.drs_step}.{self.class_alias} in datamodel.") + # Set references files used + if len(reference_files_used) > 0: + for ref_name, filename in reference_files_used: + if hasattr(result.meta.ref_file, ref_name): + getattr(result.meta.ref_file, ref_name).name = filename + + # Set CRDS context and software version + result.meta.ref_file.crds.sw_version = crds_client.get_svn_version() + result.meta.ref_file.crds.context_used = crds_client.get_context_used(result.crds_observatory) - def remove_suffix(self, name): - return remove_suffix(name) + if self.parent is None: + self.log.info(f"Results used CRDS context: {result.meta.ref_file.crds.context_used}") - @wraps(Step.run) - def run(self, *args, **kwargs): - result = super().run(*args, **kwargs) - if not self.parent: - self.log.info(f"Results used liger_iris_pipeline version: {__version__}") - return result + # Reset status + self.status = None @wraps(Step.__call__) def __call__(self, *args, **kwargs): @@ -93,69 +152,189 @@ def __call__(self, *args, **kwargs): ) return super().__call__(*args, **kwargs) - @classmethod - def build_config(cls, input, **kwargs): # noqa: A002 + def save_model( + self, model, + output_path : str | None = None, + output_dir : str | None = None + ): + """ + Saves the given model using the step/pipeline's naming scheme. """ - Build the ConfigObj to initialize a Step. - This does not call out to CRDS top determine the appropriate config. + if output_path: + output_path = model.save(output_path) + else: + if output_dir is None: + output_dir = self.output_dir + output_path = self.make_output_path(model, output_dir=self.output_dir) + output_path = model.save(output_path) + self.log.info(f"Saved model in {output_path}") - A Step config is built in the following order: - - Local parameter reference file - - Step keyword arguments + return output_path + + def run(self, input, *args, **kwargs): + """ + Run handles the generic setup and teardown that happens with the running of each step. + The real work that is unique to each step type is done in the `process` method. - Parameters - ---------- - input : str or None - Input file + Args: + input (str | LigerIRISDataModel | list[str] | list[LigerIRISDataModel]): + 1. filename of datamodel + 2. filename of ASN + 3. datamodel + 4. ASN + 5. list of filenames + 6. list of datamodels + Returns: + result (LigerIRISDataModel | list[LigerIRISDataModel] | list[LigerIRISDataModel]): + The result(s) of the step. Steps can only return a single model, but Pipelines can return a list. + """ + gc.collect() + with stpipe.log.record_logs(formatter=self._log_records_formatter) as log_records: + self._log_records = log_records - kwargs : dict - Keyword arguments that specify Step parameters. + # Make generic log messages go to this step's logger + orig_log = stpipe.log.delegator.log + stpipe.log.delegator.log = self.log - Returns - ------- - config, config_file : ConfigObj, str - The configuration and the config filename. - """ - config = config_parser.ConfigObj() - - if "config_file" in kwargs: - config_file = kwargs["config_file"] - del kwargs["config_file"] - config_from_file = config_parser.load_config_file(str(config_file)) - config_parser.merge_config(config, config_from_file) - config_dir = os.path.dirname(config_file) - else: - config_file = None - config_dir = "" - - config_kwargs = config_parser.ConfigObj() - - # load and merge configuration files for each step they are provided: - steps = {} - if "steps" in kwargs: - for step, pars in kwargs["steps"].items(): - if "config_file" in pars: - step_config_file = os.path.join(config_dir, pars["config_file"]) - cfgd = config_parser.load_config_file(step_config_file) - if "name" in cfgd: - if cfgd["name"] != step: - raise ValueError( - "Step name from configuration file " - f"'{step_config_file}' does not match step " - "name in the 'steps' argument." - ) - del cfgd["name"] - cfgd.pop("class", None) - cfgd.update(pars) - steps[step] = cfgd + step_result = None + + # log Step or Pipeline parameters from top level only + if self.parent is None: + self.log.info( + "Step %s parameters are:%s", + self.name, + # Add an indent to each line of the YAML output + "\n " + + "\n ".join( + yaml.dump(self.get_pars(), sort_keys=False) + .strip() + # Convert serialized YAML types true/false/null to Python types + .replace(" false", " False") + .replace(" true", " True") + .replace(" null", " None") + .splitlines() + ), + ) + + # Main try block + try: + + # Update the params based on kwargs + pars = self.get_pars() + kwargs_process = copy.deepcopy(kwargs) + for k, v in kwargs.items(): + if k in pars: + setattr(self, k, v) + del kwargs_process[k] # Remaining are for process + + # Prefetch references + self._reference_files_used = [] + if not self.skip and self.prefetch_references: + self.prefetch(input) + + # Call process and catch signature error + if not self.skip: + try: + step_result = self.process(input, *args, **kwargs_process) + except TypeError as e: + if "process() takes exactly" in str(e): + raise TypeError( + "Incorrect number of arguments to step" + ) from e + raise else: - steps[step] = pars + self.log.info(f"Skipping step {self.name}") + + # Update meta information regardless of skip + if isinstance(step_result, Sequence): + for result in step_result: + self.finalize_result(result, self._reference_files_used) + else: + self.finalize_result(step_result, self._reference_files_used) + + self._reference_files_used = [] # Reset? + + # Save the results even if skipped since metadata is udpated. + if self.save_results: + if isinstance(step_result, Sequence): + for result in step_result: + self.save_model(result, output_dir=self.output_dir) + else: + self.save_model(step_result, output_dir=self.output_dir) + + if not self.skip: + self.log.info(f"Step {self.name} done") + finally: + stpipe.log.delegator.log = orig_log + + return step_result + + def init_logger(self, config : config_parser.ConfigObj | None = None): + """ + Initialize logging for the step. + Config ignored for now. + """ + # A list of logging.LogRecord emitted to the stpipe root logger + # during the most recent call to Step.run. + self._log_records = [] - kwargs = {k: v for k, v in kwargs.items() if k != "steps"} - if steps: - kwargs["steps"] = steps + # Namespace for the logger + self.name = self.__class__.__name__ + self.qualified_name = f"{stpipe.log.STPIPE_ROOT_LOGGER}.{self.name}" + self.parent = None + self.log = stpipe.log.getLogger(self.qualified_name) + self.log.setLevel(stpipe.log.logging.DEBUG) + self.log.info(f"{self.__class__.__name__} instance created.") + + @staticmethod + def _make_output_path(step : Self, model : datamodels.LigerIRISDataModel, output_dir : str | None): + """ + Generate the output path for the given model. + """ + if output_dir is None: + if step.output_dir is not None: + output_dir = step.output_dir + elif model._filename is not None: + output_dir = os.path.split(os.path.abspath(model._filename))[0] + else: + raise ValueError("No output directory provided and no default found.") + output_filename = model.generate_filename() + output_path = os.path.join(output_dir, output_filename) - config_parser.merge_config(config_kwargs, kwargs) - config_parser.merge_config(config, config_kwargs) + return output_path - return config, config_file \ No newline at end of file + @classmethod + def load_spec_file(cls, preserve_comments=stpipe.utilities._not_set): + spec = super().load_spec_file(preserve_comments=preserve_comments) + for k in cls.exclude_spec: + if k in spec: + del spec[k] + return spec + + def get_pars(self, full_spec=True): + pars_dict = super().get_pars(full_spec=full_spec) + for k in self.exclude_spec: + if k in pars_dict: + del pars_dict[k] + return pars_dict + + @staticmethod + def parse_config_kwarg(key : str, val : str | None, spec): + """ + TODO: Implement spec validation, defaults are grabbed above. + """ + if not isinstance(val, str) or val is None: + return val + if val.lower() == "true": + return True + if val.lower() == "false": + return False + try: + return int(val) + except ValueError: + pass + try: + return float(val) + except ValueError: + pass + return val \ No newline at end of file diff --git a/liger_iris_pipeline/dark_subtraction/dark_step.py b/liger_iris_pipeline/dark_subtraction/dark_step.py index 9585832..d6c7312 100644 --- a/liger_iris_pipeline/dark_subtraction/dark_step.py +++ b/liger_iris_pipeline/dark_subtraction/dark_step.py @@ -1,7 +1,7 @@ #from .. import datamodels from ..base_step import LigerIRISStep -from ..datamodels import DarkModel from . import dark_sub +from .. import datamodels from ..utils.subarray import get_subarray_model @@ -16,41 +16,40 @@ class DarkSubtractionStep(LigerIRISStep): """ spec = """ - dark_output = output_file(default = None) # Dark model subtracted + dark_output_dir = string(default = None) # Path to save the ref dark from CRDS. """ reference_file_types = ["dark"] + class_alias = "dark_sub" def process(self, input): # Open the input data model - with self.open_model(input) as input_model: + with datamodels.open(input) as input_model: # Get the name of the dark reference file to use self.dark_filename = self.get_reference_file(input_model, "dark") self.log.info("Using DARK reference file %s", self.dark_filename) # Check for a valid reference file - if self.dark_filename == "N/A": - self.log.warning("No DARK reference file found") - self.log.warning("Dark current step will be skipped") - result = input_model.copy() - result.meta.cal_step.dark = "SKIPPED" + if self.dark_filename == "N/A" or self.dark_filename is None: + self.log.warning("No DARK reference file found, skipping dark subtraction") + self.status = "SKIPPED" return result - # Create name for the intermediate dark, if desired. - dark_output = self.dark_output - if dark_output is not None: - dark_output = self.make_output_path( - None, basepath=dark_output, ignore_use_model=True - ) - # Open the dark ref file data model - dark_model = DarkModel(self.dark_filename) + dark_model = datamodels.DarkModel(self.dark_filename) dark_model = get_subarray_model(input_model, dark_model) # Do the dark correction - result = dark_sub.do_correction(input_model, dark_model, dark_output) + result = dark_sub.subtract_dark(input_model, dark_model) + + # Save the dark ref model + if self.dark_output_dir is not None: + # TODO: HERE !!!! Make function make_output_path. Should be easy with correct datamodels + dark_path = self.make_output_path(dark_model, output_dir=self.dark_output_dir) + dark_model.save(dark_path) dark_model.close() + self.status = "COMPLETE" - return result + return result \ No newline at end of file diff --git a/liger_iris_pipeline/dark_subtraction/dark_sub.py b/liger_iris_pipeline/dark_subtraction/dark_sub.py index 8eecb66..cbf4f37 100644 --- a/liger_iris_pipeline/dark_subtraction/dark_sub.py +++ b/liger_iris_pipeline/dark_subtraction/dark_sub.py @@ -9,87 +9,20 @@ log.setLevel(logging.DEBUG) -def do_correction(input_model, dark_model, dark_output=None): +def subtract_dark(input_model, dark_model): """ - Short Summary - ------------- - Execute all tasks for Dark Current Subtraction + Subtract dark current data from the input science data model. - Parameters - ---------- - input_model: data model object - science data to be corrected - - dark_model: dark model object - dark data - - dark_output: string - file name in which to optionally save averaged dark data - - Returns - ------- - output_model: data model object - dark-subtracted science data + Args: """ - - # Save some data params for easy use later - instrument = input_model.meta.instrument.name - - # Replace NaN's in the dark with zeros - dark_model.data[np.isnan(dark_model.data)] = 0.0 - - output_model = subtract_dark(input_model, dark_model) - - # If the user requested to have the dark file saved, - # save the reference model as this file. This will - # ensure consistency from the user's standpoint - if dark_output is not None: - log.info("Writing dark current data to %s", dark_output) - dark_model.save(dark_output) - - output_model.meta.cal_step.dark_sub = "COMPLETE" - - return output_model - - -def subtract_dark(input, dark): - """ - Subtracts dark current data from science arrays, combines - error arrays in quadrature, and updates data quality array based on - DQ flags in the dark arrays. - - Parameters - ---------- - input: data model object - the input science data - - dark: dark model object - the dark current data - - Returns - ------- - output: data model object - dark-subtracted science data - - """ - - log.debug("subtract_dark: size=%d,%d", input.data.shape[0], input.data.shape[1]) - - # Create output as a copy of the input science data model - output = input.copy() - - # All other instruments have a single 2D dark DQ array - darkdq = dark.dq + output_model = input_model.copy() # Combine the dark and science DQ data - output.dq = np.bitwise_or(input.dq, darkdq) - - output.data -= dark.data + output_model.dq = np.bitwise_or(input_model.dq, dark_model.dq) - # combine the ERR arrays in quadrature - # NOTE: currently stubbed out until ERR handling is decided - # output.err[i,j] = np.sqrt( - # output.err[i,j]**2 + dark.err[j]**2) + # Subtract (e-) + output_model.data -= dark_model.data - return output + # Return + return output_model \ No newline at end of file diff --git a/liger_iris_pipeline/datamodels/container_old.py b/liger_iris_pipeline/datamodels/container_dev.py similarity index 100% rename from liger_iris_pipeline/datamodels/container_old.py rename to liger_iris_pipeline/datamodels/container_dev.py diff --git a/liger_iris_pipeline/datamodels/library.py b/liger_iris_pipeline/datamodels/library_dev.py similarity index 100% rename from liger_iris_pipeline/datamodels/library.py rename to liger_iris_pipeline/datamodels/library_dev.py diff --git a/liger_iris_pipeline/datamodels/model_base.py b/liger_iris_pipeline/datamodels/model_base.py index 142333e..2d78387 100644 --- a/liger_iris_pipeline/datamodels/model_base.py +++ b/liger_iris_pipeline/datamodels/model_base.py @@ -3,6 +3,7 @@ from astropy.time import Time from astropy.io import fits +from datetime import datetime from stdatamodels import DataModel __all__ = ["LigerIRISDataModel"] @@ -35,9 +36,11 @@ def __init__(self, init=None, instrument : str | None = None, **kwargs): super().__init__(init=init, **kwargs) if isinstance(init, str | Path): - self.filename = str(init) + self._filename = str(init) + elif isinstance(init, fits.HDUList): + self._filename = init.filename() else: - self.filename = None + self._filename = None def set_schema_from_instrument(self, instrument : str): s = self.schema_url.rsplit('/', 1) @@ -100,19 +103,11 @@ def on_save(self, init): super().on_save(init) if self.meta.filename is None and isinstance(init, str | Path): self.meta.filename = str(os.path.basename(init)) - elif isinstance(self.meta.filename, str | Path) and isinstance(init, str | Path): + elif isinstance(init, str | Path): if self.meta.filename != str(init): self.meta.filename = str(init) self.meta.date_created = Time.now().isot - @property - def filename(self): - return self._filename - - @filename.setter - def filename(self, value): - self.filename = value - @property def input_path(self): if self.filename is not None: @@ -133,12 +128,24 @@ def instrument(self): return self.meta.instrument.name @staticmethod - def generate_filename( + def _generate_filename( instrument : str, - obsid : str, - detector : str, obstype : str, level : int | str = 0, + sem_id : str | None, + program_number : str | None, obs_number : str | None, + detector : str, exptype : str, level : int | str = 0, exp : int | str = '0001', subarray : int | str | None = None ): + if sem_id is None: + t = datetime.now() + sem_id = str(t.year) + if t.month < 8: + sem_id += 'A' + else: + sem_id += 'B' + if program_number is None: + program_number = 'P001' + if obs_number is None: + obs_number = '001' if instrument.lower() == 'iris': instrument = 'IRIS' elif instrument.lower() == 'liger': @@ -151,13 +158,50 @@ def generate_filename( subarray = '-' + str(subarray).zfill(2) else: subarray = '-00' - return f"{obsid}_{instrument}_{detector.upper()}_{obstype}_LVL{int(level)}_{exp}{subarray}.fits" + return f"{sem_id}-{program_number}-{obs_number}_{instrument}_{detector.upper()}_{exptype}_LVL{int(level)}_{exp}{subarray}.fits" + + def generate_filename( + self, + instrument : str | None = None, + sem_id : str | None = None, + program_number : str | None = None, obs_number : str | None = None, + detector : str | None = None, exptype : str | None = None, level : int | str | None = None, + exp : int | str | None = None, subarray : int | str | None = None + ): + instrument = instrument if instrument is not None else self.instrument + sem_id = sem_id if sem_id is not None else self.meta.program.sem_id + program_number = program_number if program_number is not None else self.meta.program.program_number + obs_number = obs_number if obs_number is not None else self.meta.program.obs_number + detector = detector if detector is not None else self.meta.instrument.detector + exptype = exptype if exptype is not None else self.meta.exposure.type + level = level if level is not None else self.meta.data_level + exp = exp if exp is not None else self.meta.exposure.number + subarray = subarray if subarray is not None else self.meta.subarray.id + + # Override exptype for now to include additional info for development + # NOTE: Remove this eventually + exptype += f'-{self.meta.target.name}' + f'-{self.meta.instrument.filter}' + f'-{self.meta.instrument.scale}' + + return self._generate_filename( + instrument=instrument, + sem_id=sem_id, program_number=program_number, obs_number=obs_number, + detector=detector, exptype=exptype, + level=level, exp=exp, subarray=subarray + ) def get_primary_array_name(self): return 'data' - @classmethod - def from_model(cls, input_model): - model = cls(instrument=input_model.instrument) - model.__dict__.update(input_model.__dict__) - return model \ No newline at end of file + # @classmethod + # def from_model(cls, input_model): + # model = cls(instrument=input_model.instrument) + # model.__dict__.update(input_model.__dict__) + # return model + + def copy(self, memo=None): + """ + Returns a deep copy of this model. + """ + result = self.__class__(instrument=self.instrument) + self.clone(result, self, deepcopy=True, memo=memo) + return result \ No newline at end of file diff --git a/liger_iris_pipeline/datamodels/referencefile.py b/liger_iris_pipeline/datamodels/referencefile.py index a7595c9..d8266a9 100644 --- a/liger_iris_pipeline/datamodels/referencefile.py +++ b/liger_iris_pipeline/datamodels/referencefile.py @@ -49,7 +49,7 @@ def print_err(self, message): @staticmethod - def generate_filename( + def _generate_filename( instrument : str, detector : str, reftype : str, date : str, version : str @@ -62,4 +62,22 @@ def generate_filename( instrument = 'Liger' else: raise ValueError(f"Unknown instrument {instrument}") - return f"{instrument}_{detector.upper()}_{reftype}_{date}_{version}.fits" \ No newline at end of file + return f"{instrument}_{detector.upper()}_{reftype}_{date}_{version}.fits" + + def generate_filename( + self, + instrument : str | None = None, + detector : str | None = None, + reftype : str | None = None, + date : str | None = None, + version : str | None = None + ): + instrument = instrument if instrument is not None else self.instrument + detector = detector if detector is not None else self.meta.instrument.detector + reftype = reftype if reftype is not None else self.meta.reftype + date = self.meta.date.replace(':', '').replace('-', '')[0:15] + version = self.meta.ref_version if self.meta.ref_version is not None else '0.0.1' + return self._generate_filename( + instrument=instrument, detector=detector, reftype=reftype, + date=date, version=version + ) \ No newline at end of file diff --git a/liger_iris_pipeline/datamodels/schemas/DarkModel.schema.yaml b/liger_iris_pipeline/datamodels/schemas/DarkModel.schema.yaml index 1466400..9023065 100644 --- a/liger_iris_pipeline/datamodels/schemas/DarkModel.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/DarkModel.schema.yaml @@ -3,7 +3,7 @@ $schema: "http://stsci.edu/schemas/fits-schema/fits-schema" id: "https://oirlab.github.io/schemas/DarkModel.schema" allOf: -- $ref: core.schema +- $ref: ReferenceFileModel.schema - type: object properties: data: diff --git a/liger_iris_pipeline/datamodels/schemas/FlatModel.schema.yaml b/liger_iris_pipeline/datamodels/schemas/FlatModel.schema.yaml index ac1198b..cf952df 100644 --- a/liger_iris_pipeline/datamodels/schemas/FlatModel.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/FlatModel.schema.yaml @@ -3,7 +3,7 @@ $schema: "http://stsci.edu/schemas/fits-schema/fits-schema" id: "https://oirlab.github.io/schemas/FlatModel.schema" allOf: -- $ref: core.schema +- $ref: ReferenceFileModel.schema - type: object properties: data: diff --git a/liger_iris_pipeline/datamodels/schemas/NonlinearReadoutParametersModel.schema.yaml b/liger_iris_pipeline/datamodels/schemas/NonlinearReadoutParametersModel.schema.yaml index 38637a0..532a3fe 100644 --- a/liger_iris_pipeline/datamodels/schemas/NonlinearReadoutParametersModel.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/NonlinearReadoutParametersModel.schema.yaml @@ -3,7 +3,7 @@ $schema: "http://stsci.edu/schemas/fits-schema/fits-schema" id: "https://oirlab.github.io/schemas/NonlinearReadoutParametersModel.schema" allOf: -- $ref: core.schema +- $ref: ReferenceFileModel.schema - type: object properties: coeffs: diff --git a/liger_iris_pipeline/datamodels/schemas/ReferenceFileModel.schema.yaml b/liger_iris_pipeline/datamodels/schemas/ReferenceFileModel.schema.yaml index 242c040..09be8be 100644 --- a/liger_iris_pipeline/datamodels/schemas/ReferenceFileModel.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/ReferenceFileModel.schema.yaml @@ -3,4 +3,5 @@ $schema: "http://stsci.edu/schemas/fits-schema/fits-schema" id: "https://oirlab.github.io/schemas/ReferenceFileModel.schema" allOf: -- $ref: core.schema \ No newline at end of file +- $ref: core.schema +- $ref: reference_file.schema \ No newline at end of file diff --git a/liger_iris_pipeline/datamodels/schemas/common/core.schema.yaml b/liger_iris_pipeline/datamodels/schemas/common/core.schema.yaml index 1ca721f..8a5771e 100644 --- a/liger_iris_pipeline/datamodels/schemas/common/core.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/common/core.schema.yaml @@ -13,7 +13,7 @@ allOf: # NOTE: We drop observation schema from jwst - $ref: barycenter.schema - $ref: subarray.schema # REMIND ME: Why do we have subarray and subarray_map in the metadata? - $ref: subarray_map.schema -- $ref: cal_steps.schema +- $ref: drs_steps.schema type: object properties: meta: diff --git a/liger_iris_pipeline/datamodels/schemas/common/cal_steps.schema.yaml b/liger_iris_pipeline/datamodels/schemas/common/drs_steps.schema.yaml similarity index 50% rename from liger_iris_pipeline/datamodels/schemas/common/cal_steps.schema.yaml rename to liger_iris_pipeline/datamodels/schemas/common/drs_steps.schema.yaml index 2459499..425e075 100644 --- a/liger_iris_pipeline/datamodels/schemas/common/cal_steps.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/common/drs_steps.schema.yaml @@ -1,21 +1,37 @@ %YAML 1.1 --- $schema: "http://stsci.edu/schemas/fits-schema/fits-schema" -id: "https://oirlab.github.io/schemas/cal_steps.schema" +id: "https://oirlab.github.io/schemas/drs_steps.schema" type: object properties: meta: type: object properties: - cal_step: - title: "Calibration Steps performed by the DRS" + drs_step: + title: "Steps performed by the DRS" type: object properties: # NOTE: This is actually all of the steps, but typically every step corresponds to a calibration step or file # NOTE: S_ is for Step - title: Calibration step information + # These map 1-1 with the calibration steps in the DRS. Pipelines are not supported. + title: DRS step information. type: object properties: + dq_init: + title: Data quality initialization + type: string + fits_keyword: S_DQINIT + blend_table: True + fit_ramp: + title: Ramp fitting + type: string + fits_keyword: S_RAMPFI + blend_table: True + nonlincorr: + title: Nonlinear readout correction + type: string + fits_keyword: S_NLCORR + blend_table: True assign_wcs: title: Assign World Coordinate System type: string @@ -26,10 +42,10 @@ properties: type: string fits_keyword: S_MTWCS blend_table: True - sky_back_sub: - title: Background subtraction + sky_sub: + title: Sky subtraction type: string - fits_keyword: S_SBKDSB + fits_keyword: S_SKYSUB blend_table: True bias: title: Bias subtraction @@ -51,13 +67,33 @@ properties: type: string fits_keyword: S_GANSCL blend_table: True - photom: - title: Photometric Calibration + fluxcal: + title: Flux Calibration + type: string + fits_keyword: S_FLUXCL + blend_table: True + resample: + title: Flux Calibration type: string - fits_keyword: S_PHOTOM + fits_keyword: S_RESAMP blend_table: True wavecal: title: Wavelength calibration type: string fits_keyword: S_WAVCAL blend_table: True + parse_subarrays: + title: Parse subarrays + type: string + fits_keyword: S_PARSSA + blend_table: True + merge_subarrays: + title: Merge subarrays + type: string + fits_keyword: S_MERGSA + blend_table: True + normalize: # TODO: UPDATE THIS for 1D and 2D + title: Normalize data + type: string + fits_keyword: S_NORMAL + blend_table: True diff --git a/liger_iris_pipeline/datamodels/schemas/common/reference_file.schema.yaml b/liger_iris_pipeline/datamodels/schemas/common/reference_file.schema.yaml index 7c735d2..2bb15b2 100644 --- a/liger_iris_pipeline/datamodels/schemas/common/reference_file.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/common/reference_file.schema.yaml @@ -15,7 +15,7 @@ properties: title: The pedigree of the reference file type: string fits_keyword: PEDIGREE - version: + ref_version: title: The version number of the reference file type: string fits_keyword: REFVER \ No newline at end of file diff --git a/liger_iris_pipeline/datamodels/schemas/iris/tmt_program.schema.yaml b/liger_iris_pipeline/datamodels/schemas/iris/tmt_program.schema.yaml index 44d6af5..aac4f6e 100644 --- a/liger_iris_pipeline/datamodels/schemas/iris/tmt_program.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/iris/tmt_program.schema.yaml @@ -21,18 +21,18 @@ properties: type: string fits_keyword: PROGPI blend_table: True - category: - title: Program category + sem_id: + title: Semester ID type: string - fits_keyword: PROGCAT + fits_keyword: SEMID blend_table: True - sub_category: - title: Program sub-category + program_number: + title: Program number in the semester type: string - fits_keyword: PROGSCAT + fits_keyword: PROGNUM blend_table: True - id: - title: Program ID + obs_number: + title: Observation number in the semester type: string - fits_keyword: PROGID + fits_keyword: OBSNUM blend_table: True \ No newline at end of file diff --git a/liger_iris_pipeline/datamodels/schemas/liger/keck_program.schema.yaml b/liger_iris_pipeline/datamodels/schemas/liger/keck_program.schema.yaml index 6f1fda0..45c6100 100644 --- a/liger_iris_pipeline/datamodels/schemas/liger/keck_program.schema.yaml +++ b/liger_iris_pipeline/datamodels/schemas/liger/keck_program.schema.yaml @@ -21,18 +21,18 @@ properties: type: string fits_keyword: PROGPI blend_table: True - category: - title: Program category + sem_id: + title: Semester ID type: string - fits_keyword: PROGCAT + fits_keyword: SEMID blend_table: True - sub_category: - title: Program sub-category + program_number: + title: Program number in the semester type: string - fits_keyword: PROGSCAT + fits_keyword: PROGNUM blend_table: True - id: - title: Program ID + obs_number: + title: Observation number in the semester type: string - fits_keyword: PROGID + fits_keyword: OBSNUM blend_table: True \ No newline at end of file diff --git a/liger_iris_pipeline/datamodels/utils.py b/liger_iris_pipeline/datamodels/utils.py index 5c95a08..8b2b543 100644 --- a/liger_iris_pipeline/datamodels/utils.py +++ b/liger_iris_pipeline/datamodels/utils.py @@ -44,7 +44,8 @@ def open(init=None, memmap=False, **kwargs): # If init is already a datamodel, copy and return if isinstance(init, LigerIRISDataModel): - return init.__class__(init, **kwargs) + return init + #return init.__class__(init, **kwargs) # Convert path to string if isinstance(init, Path): diff --git a/liger_iris_pipeline/dq_init/dq_init_step.py b/liger_iris_pipeline/dq_init/dq_init_step.py index dfa5495..5f66108 100644 --- a/liger_iris_pipeline/dq_init/dq_init_step.py +++ b/liger_iris_pipeline/dq_init/dq_init_step.py @@ -17,7 +17,8 @@ class DQInitStep(LigerIRISStep): with the pixeldq (or dq) attribute of the input model. """ - reference_file_types = ['mask'] + reference_file_types = ['dq'] + class_alias = "dq_init" def process(self, input): """Perform the dq_init calibration step @@ -29,50 +30,27 @@ def process(self, input): Returns ------- - output_model : JWST datamodel - result JWST datamodel + LigerIRISDataModel : The output datamodel. """ - # Try to open the input as a regular RampModel - try: - input_model = datamodels.TMTRampModel(input) - - # Check to see if it's Guider raw data - if input_model.meta.exposure.type in dq_initialization.guider_list: - # Reopen as a GuiderRawModel - input_model.close() - input_model = datamodels.GuiderRawModel(input) - self.log.info("Input opened as GuiderRawModel") - - except (TypeError, ValueError): - # If the initial open attempt fails, - # try to open as a GuiderRawModel - try: - input_model = datamodels.GuiderRawModel(input) - self.log.info("Input opened as GuiderRawModel") - except (TypeError, ValueError): - self.log.error("Unexpected or unknown input model type") - except: - self.log.error("Can't open input") - raise + # TODO: Implement me # Retreive the mask reference file name self.mask_filename = self.get_reference_file(input_model, 'mask') self.log.info('Using MASK reference file %s', self.mask_filename) + self.status = "COMPLETE" # Check for a valid reference file if self.mask_filename == 'N/A': self.log.warning('No MASK reference file found') self.log.warning('DQ initialization step will be skipped') result = input_model.copy() - result.meta.cal_step.dq_init = 'SKIPPED' + self.status = "SKIPPED" return result - # Load the reference file - mask_model = datamodels.TMTMaskModel(self.mask_filename) - # Apply the step result = dq_initialization.correct_model(input_model, mask_model) + self.status = "COMPLETE" # Close the data models for the input and ref file input_model.close() diff --git a/liger_iris_pipeline/drsrop_clib/__init__.py b/liger_iris_pipeline/drsrop_clib/__init__.py deleted file mode 100644 index 827741e..0000000 --- a/liger_iris_pipeline/drsrop_clib/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from ._drsrop_clib import uptheramp_c,mcds_c,nonlin_c -__all__ = ["uptheramp_c,mcds_c,nonlin_c"] diff --git a/liger_iris_pipeline/drsrop_clib/_drsrop_clib.pyx b/liger_iris_pipeline/drsrop_clib/_drsrop_clib.pyx deleted file mode 100644 index 7d7c45e..0000000 --- a/liger_iris_pipeline/drsrop_clib/_drsrop_clib.pyx +++ /dev/null @@ -1,20 +0,0 @@ -#cython: language_level=3 - -cdef extern from "../iris_readout/src/drsrop.c": - float* uptheramp (int* arr, int* time, int a, int b, int c) - float* mcds (int* arr, int* time, int a, int b, int c, int num_coadd) - float* nonlin_corr (int* arr, int* time, int a, int b, int c, int x0, int y0, float* c0, float* c1, float* c2, float* c3, float* c4) -import numpy as np -cimport numpy as np - -def uptheramp_c(int[:,:,:] arr_f, int[:] arr_time): - cdef float[:,:] results = uptheramp(&arr_f[0,0,0], &arr_time[0], arr_f.shape[0], arr_f.shape[1], arr_f.shape[2]) - return np.asarray(results) - -def mcds_c(int[:,:,:] arr_f, int[:] arr_time, int num_coadd): - cdef float[:,:] results = mcds(&arr_f[0,0,0], &arr_time[0], arr_f.shape[0], arr_f.shape[1], arr_f.shape[2], num_coadd) - return np.asarray(results) - -def nonlin_c(int[:,:,:] arr_f, int[:] arr_time, float[:,:] arr_c0, float[:,:] arr_c1, float[:,:] arr_c2, float[:,:] arr_c3, float[:,:] arr_c4): - cdef float[:,:,:] results = nonlin_corr(&arr_f[0,0,0], &arr_time[0], arr_f.shape[0], arr_f.shape[1], arr_f.shape[2],0,0, &arr_c0[0,0],&arr_c1[0,0],&arr_c2[0,0],&arr_c3[0,0],&arr_c4[0,0] ) - return np.asarray(results) diff --git a/liger_iris_pipeline/flatfield/flat_field.py b/liger_iris_pipeline/flatfield/flat_field.py index e17745c..68228b4 100644 --- a/liger_iris_pipeline/flatfield/flat_field.py +++ b/liger_iris_pipeline/flatfield/flat_field.py @@ -61,29 +61,9 @@ def do_flat_field(output_model, flat_model): flat_model : JWST data model data model containing flat-field """ - log.debug("Flat field correction ") - - any_updated = False # will set True if any flats applied - - # Check to see if flat data array is smaller than science data - if (output_model.data.shape[-1] > flat_model.data.shape[-1]) or ( - output_model.data.shape[-2] > flat_model.data.shape[-2] - ): - log.warning("Reference data array is smaller than science data") - log.warning("Step will be skipped") - - # Apply flat to all other models - else: - apply_flat_field(output_model, flat_model) - any_updated = True - - if any_updated: - output_model.meta.cal_step.flat_field = "COMPLETE" - else: - output_model.meta.cal_step.flat_field = "SKIPPED" - - + apply_flat_field(output_model, flat_model) + def apply_flat_field(science, flat): """Flat field the data and error arrays. diff --git a/liger_iris_pipeline/flatfield/flat_field_step.py b/liger_iris_pipeline/flatfield/flat_field_step.py index e5713fa..b2b57e6 100755 --- a/liger_iris_pipeline/flatfield/flat_field_step.py +++ b/liger_iris_pipeline/flatfield/flat_field_step.py @@ -13,6 +13,7 @@ class FlatFieldStep(LigerIRISStep): """ reference_file_types = ["flat"] + class_alias = "flat_field" def process(self, input): input_model = datamodels.open(input) @@ -30,6 +31,7 @@ def process(self, input): missing = True if missing: self.log.warning("Flat-field step will be skipped") + self.status = "SKIPPED" return self.skip_step(input_model) self.log.debug("Opening flat as FlatModel") @@ -41,6 +43,8 @@ def process(self, input): flat_model, ) + self.status = "COMPLETE" + # Close the inputs input_model.close() flat_model.close() diff --git a/liger_iris_pipeline/merge_subarrays/merge_subarrays.py b/liger_iris_pipeline/merge_subarrays/merge_subarrays.py index 1dc11be..2d80031 100644 --- a/liger_iris_pipeline/merge_subarrays/merge_subarrays.py +++ b/liger_iris_pipeline/merge_subarrays/merge_subarrays.py @@ -10,6 +10,8 @@ class MergeSubarraysStep(LigerIRISStep): """ """ + class_alias = "merge_subarrays" + def process(self, input): # TODO: Update input to be a Subarray ASN ? diff --git a/liger_iris_pipeline/normalize/normalize.py b/liger_iris_pipeline/normalize/normalize.py index 87f9e9f..e11068d 100644 --- a/liger_iris_pipeline/normalize/normalize.py +++ b/liger_iris_pipeline/normalize/normalize.py @@ -31,8 +31,6 @@ def do_correction(input_model, method="median"): output_model = apply_norm(input_model, method) - output_model.meta.cal_step.normalize = "COMPLETE" - return output_model diff --git a/liger_iris_pipeline/normalize/normalize_step.py b/liger_iris_pipeline/normalize/normalize_step.py index 13ab019..e4987b8 100644 --- a/liger_iris_pipeline/normalize/normalize_step.py +++ b/liger_iris_pipeline/normalize/normalize_step.py @@ -12,15 +12,16 @@ class NormalizeStep(LigerIRISStep): by its own mean, median or mode """ + class_alias = "normalize" + spec = """ method = string(default='median') """ def process(self, input): - if isinstance(input, str): - with datamodels.open(input) as input_model: - result = normalize.do_correction(input_model, method=self.method) - else: - result = normalize.do_correction(input, method=self.method) + with datamodels.open(input) as input_model: + result = normalize.do_correction(input_model, method=self.method) + + self.status = "COMPLETE" return result diff --git a/liger_iris_pipeline/parse_subarray_map/parse_subarray_map_step.py b/liger_iris_pipeline/parse_subarray_map/parse_subarray_map_step.py index 5058891..df0b415 100644 --- a/liger_iris_pipeline/parse_subarray_map/parse_subarray_map_step.py +++ b/liger_iris_pipeline/parse_subarray_map/parse_subarray_map_step.py @@ -37,6 +37,8 @@ class ParseSubarrayMapStep(LigerIRISStep): and data quality flag accordingly """ + class_alias = "parse_subarrays" + def process(self, input): if isinstance(input, str): @@ -59,8 +61,10 @@ def process(self, input): result.dq[result["subarr_map"] != 0], 2 ** SUBARRAY_DQ_BIT ) + self.status = "COMPLETE" else: self.log.info("No SUBARR_MAP extension found") result = input_model + self.status = "SKIPPED" return result diff --git a/liger_iris_pipeline/pipeline/__init__.py b/liger_iris_pipeline/pipeline/__init__.py index e35a11e..76c6741 100644 --- a/liger_iris_pipeline/pipeline/__init__.py +++ b/liger_iris_pipeline/pipeline/__init__.py @@ -1,4 +1,5 @@ from .stage1 import Stage1Pipeline from .imager_stage2 import ImagerStage2Pipeline from .create_flatfield import CreateFlatfield -__all__ = ["Stage1Pipeline", "ImagerStage2Pipeline", "CreateFlatfield"] +from .base_pipeline import LigerIRISPipeline +__all__ = ["Stage1Pipeline", "ImagerStage2Pipeline", "CreateFlatfield", "LigerIRISPipeline"] diff --git a/liger_iris_pipeline/pipeline/base_pipeline.py b/liger_iris_pipeline/pipeline/base_pipeline.py index efdf183..95048b9 100644 --- a/liger_iris_pipeline/pipeline/base_pipeline.py +++ b/liger_iris_pipeline/pipeline/base_pipeline.py @@ -2,18 +2,104 @@ from collections import defaultdict from stpipe import Pipeline +from .. import datamodels from ..base_step import LigerIRISStep from pathlib import Path +import stpipe from ..associations import LigerIRISAssociation, load_asn +from stpipe import config_parser + __all__ = [ "LigerIRISPipeline" ] -class LigerIRISPipeline(Pipeline, LigerIRISStep): +class LigerIRISPipeline(LigerIRISStep, Pipeline): default_association : LigerIRISAssociation = None - + + def __init__(self, config_file : str | None = None, **kwargs): + """ + Create a LigerIRISPipeline instance. + Configuration is determined according to: + 1. Class's spec object + 2. config_file + 3. kwargs + """ + self._reference_files_used = [] + self.init_logger() + + # Load config for this Pipeline. + self.config_file = config_file + if self.config_file is not None: + config = config_parser.load_config_file(config_file) + else: + config = config_parser.ConfigObj() + + # Load any subconfig files to load + if "steps" in config: + root_dir = os.path.dirname(self.config_file or "") + for step_alias, step_config in config["steps"].items(): + if "config_file" in step_config: + step_config = config_parser.load_config_file(os.path.join(root_dir, step_config["config_file"])) + config["steps"][step_alias].merge(step_config) + else: + config["steps"] = {} + + # Parse the config from the spec and any provided kwargs + pipeline_spec = self.load_spec_file() + pipeline_kwargs = config_parser.config_from_dict( + kwargs, + pipeline_spec, + root_dir=None, + allow_missing=False + ) + pipeline_kwargs.update(config) + config = pipeline_kwargs + + # Initilize the steps and their config + self.init_steps(config) + + # Merge the spec with the config + # TODO: MERGE THIS WITH BELOW + pars = self.get_pars() + for k, v in kwargs.items(): + if k in pars: + config[k] = v + + # Set the pipeline level config parameters as member variables + for key, val in config.items(): + if key not in ("class", "steps", "config_file"): + _val = self.parse_config_kwarg(key, val, pipeline_spec) + setattr(self, key, _val) + + + def init_steps(self, config : config_parser.ConfigObj): + if self.config_file is not None: + config_dir = os.path.dirname(self.config_file) + else: + config_dir = "" + for step_alias, _class in self.step_defs.items(): + if step_alias not in config["steps"]: + config["steps"][step_alias] = {} + step_config = config["steps"][step_alias] + if "config_file" in step_config: + step_config_file = os.path.join(config_dir, step_config["config_file"]) + else: + step_config_file = None + step_kwargs = {} + if step_config_file is not None: + step_config = config_parser.load_config_file(config_file=step_config_file) + step_kwargs.update(step_kwargs) + for key, val in step_config.items(): + if key not in ('class', 'config_file'): + step_kwargs[key] = val + new_step = _class( + config_file=step_config_file, + **step_kwargs + ) + setattr(self, step_alias, new_step) + def input_to_asn(self, input): """ Convert input to an association. @@ -37,8 +123,10 @@ def input_to_asn(self, input): asn = load_asn(input) else: asn = self.default_association.from_member(input) # DataModel file + elif isinstance(input, datamodels.LigerIRISDataModel): + asn = self.default_association.from_member(input) elif isinstance(input, dict): - asn = self.default_association.from_product(input) # Single product (dict) + asn = self.default_association.from_product(input) # Single product (dict): else: raise ValueError(f"Input type {type(input)} not supported.") diff --git a/liger_iris_pipeline/pipeline/create_flatfield.py b/liger_iris_pipeline/pipeline/create_flatfield.py index 8e73978..6613f5f 100644 --- a/liger_iris_pipeline/pipeline/create_flatfield.py +++ b/liger_iris_pipeline/pipeline/create_flatfield.py @@ -1,8 +1,11 @@ +import os +import copy + import liger_iris_pipeline.datamodels as datamodels from .base_pipeline import LigerIRISPipeline from ..dark_subtraction import DarkSubtractionStep from ..normalize import NormalizeStep -from liger_iris_pipeline.associations import ImagerL1Association +from liger_iris_pipeline.associations import L1Association __all__ = ["CreateFlatfield"] @@ -16,8 +19,7 @@ class CreateFlatfield(LigerIRISPipeline): normalize """ - # TODO: MAKE THIS GENERIC FOR IMAGER AND IFU DETECTORS - default_association = ImagerL1Association + default_association = L1Association # Define alias to steps step_defs = { @@ -37,6 +39,15 @@ def process(self, input): result = self.process_exposure_product(self.asn.products[0]) self.log.info("Finished Create Flatfield.") return result + + def get_output_dir(self, input_model : datamodels.LigerIRISDataModel): + if self.output_dir is not None: + return self.output_dir + else: + if input_model.filename is not None: + return os.path.abspath(input_model.filename) + else: + return '' # Process each exposure def process_exposure_product(self, exp_product : dict): @@ -47,21 +58,22 @@ def process_exposure_product(self, exp_product : dict): raw_flat_model = members_by_type["flat"][0] self.log.info(f"Processing {raw_flat_model}") input_model = datamodels.open(raw_flat_model) - input_model = self.dark_sub(input_model) - input_model = self.normalize(input_model) + input_model = self.dark_sub.run(input_model) + input_model = self.normalize.run(input_model) # To flat field model - flat_model = datamodels.FlatModel.from_model(input_model) - - # Save the results - if self.save_results: - output_file = datamodels.ReferenceFileModel.generate_filename( - instrument=flat_model.instrument, detector=flat_model.meta.instrument.detector, - reftype='FLAT', date=flat_model.meta.date, version='0.0.1' - ) - flat_model.save(output_file) - self.log.info(f"Saved {flat_model}") + # TODO: Generalize the conversion from ImagerModel -> FlatModel for other pipelines and move to DataModel class + flat_model = datamodels.FlatModel( + instrument=input_model.instrument, + data=input_model.data, err=input_model.err, dq=input_model.dq + ) + _meta = copy.deepcopy(input_model.meta.instance) + _meta.update(flat_model.meta.instance) + flat_model.meta = _meta + flat_model.meta.reftype = "FLAT" + flat_model.meta.pedigree = None + flat_model.meta.version = '0.0.1' - self.log.info(f"Finished processing {exp_product['name']}") + self.log.info(f"Finished processing {members_by_type["flat"][0]}") return flat_model diff --git a/liger_iris_pipeline/pipeline/imager_stage2.py b/liger_iris_pipeline/pipeline/imager_stage2.py index e6b95c7..9592dcb 100644 --- a/liger_iris_pipeline/pipeline/imager_stage2.py +++ b/liger_iris_pipeline/pipeline/imager_stage2.py @@ -1,4 +1,4 @@ -from ..associations import ImagerL1Association +from ..associations import L1Association from .base_pipeline import LigerIRISPipeline from liger_iris_pipeline import datamodels from ..parse_subarray_map import ParseSubarrayMapStep @@ -6,8 +6,8 @@ from ..flatfield import FlatFieldStep from ..assign_wcs import AssignWCSStep from ..sky_subtraction import SkySubtractionImagerStep -from jwst.photom import PhotomStep as JWSTPhotomStep -from jwst.resample import ResampleStep as JWSTResampleStep +#from jwst.photom import PhotomStep as JWSTPhotomStep +#from jwst.resample import ResampleStep as JWSTResampleStep __all__ = ["ImagerStage2Pipeline"] @@ -27,7 +27,7 @@ class ImagerStage2Pipeline(LigerIRISPipeline): ResampleStep (JWST) """ - default_association = ImagerL1Association + default_association = L1Association # Define alias to steps step_defs = { @@ -35,9 +35,9 @@ class ImagerStage2Pipeline(LigerIRISPipeline): "dark_sub": DarkSubtractionStep, "flat_field": FlatFieldStep, "sky_sub": SkySubtractionImagerStep, - "photom": JWSTPhotomStep, + #"fluxcal": JWSTPhotomStep, "assign_wcs": AssignWCSStep, - "resample": JWSTResampleStep, + #"resample": JWSTResampleStep, } def process(self, input): @@ -52,9 +52,6 @@ def process(self, input): results = [] for product in self.asn["products"]: result = self.process_exposure_product(product) - - # Save result - result.meta.filename = self.output_file results.append(result) self.log.info("ImagerStage2Pipeline completed") @@ -65,7 +62,7 @@ def process(self, input): def process_exposure_product(self, exp_product : dict): """Process an exposure product. - Parameters:w + Parameters: exp_product (dict): The exposure product. """ @@ -80,16 +77,20 @@ def process_exposure_product(self, exp_product : dict): input_model = datamodels.open(science) # Run remaining steps - input_model = self.parse_subarray_map(input_model) - input_model = self.dark_sub(input_model) - input_model = self.flat_field(input_model) + input_model = self.parse_subarray_map.run(input_model) + input_model = self.dark_sub.run(input_model) + input_model = self.flat_field.run(input_model) if len(members_by_type["sky"]) > 0: - input_model = self.sky_sub(input_model, members_by_type["sky"][0]) + sky_filename = members_by_type["sky"][0] + input_model = self.sky_sub.run(input_model, sky_filename) elif not self.sky_sub.skip: - self.log.warning(f"No sky background found for {input_model} but {self.sky_sub.__class__.__name__}.skip=False.") + self.log.warning(f"No sky background found for {input_model} but {self.sky_sub.__class__.__name__}.skip=False. Skipping Sky Subtraction") + + input_model = self.assign_wcs.run(input_model) + #input_model = self.fluxcal(input_model) - input_model = self.assign_wcs(input_model) - input_model = self.photom(input_model) + # Update the model level + input_model.meta.data_level = 2 self.log.info(f"Finished processing {input_model}") diff --git a/liger_iris_pipeline/pipeline/stage1.py b/liger_iris_pipeline/pipeline/stage1.py index 0aeeebc..14162c8 100644 --- a/liger_iris_pipeline/pipeline/stage1.py +++ b/liger_iris_pipeline/pipeline/stage1.py @@ -3,7 +3,7 @@ from collections import defaultdict from .base_pipeline import LigerIRISPipeline from liger_iris_pipeline import datamodels -from ..associations import ImagerL0Association +from ..associations import L0Association # step imports from ..readout import NonlinCorrectionStep, FitRampStep @@ -24,7 +24,7 @@ class Stage1Pipeline(LigerIRISPipeline): FitRampStep """ - default_association = ImagerL0Association + default_association = L0Association # Define aliases to steps step_defs = { diff --git a/liger_iris_pipeline/readout/fit_ramp_step.py b/liger_iris_pipeline/readout/fit_ramp_step.py index 6c7f056..5aebfb9 100644 --- a/liger_iris_pipeline/readout/fit_ramp_step.py +++ b/liger_iris_pipeline/readout/fit_ramp_step.py @@ -16,6 +16,8 @@ class FitRampStep(LigerIRISStep): num_coadd = integer(default=3) """ + class_alias = "fit_ramp" + def process(self, input): """ Step for ramp fitting @@ -38,4 +40,6 @@ def process(self, input): elif input_model.meta.instrument.mode == 'IFU': model_result = IFUImageModel(data=slopes, err=slopes_err, dq=np.all(input_model.dq, axis=(2, 3))) + self.status = "COMPLETE" + return model_result \ No newline at end of file diff --git a/liger_iris_pipeline/readout/nonlincorr_step.py b/liger_iris_pipeline/readout/nonlincorr_step.py index 9cbed9d..be49f63 100644 --- a/liger_iris_pipeline/readout/nonlincorr_step.py +++ b/liger_iris_pipeline/readout/nonlincorr_step.py @@ -13,6 +13,8 @@ class NonlinCorrectionStep(LigerIRISStep): reference_file_types = ["nonlincoeff"] + class_alias = "nonlincorr" + def process(self, input): """ Step for Nonlinearity correction @@ -37,4 +39,6 @@ def process(self, input): # Close the nonlinearity file nonlin_model.close() + self.status = "COMPLETE" + return model_result diff --git a/liger_iris_pipeline/sky_subtraction/sky_subtraction_imager_step.py b/liger_iris_pipeline/sky_subtraction/sky_subtraction_imager_step.py index 90b93aa..1c3df8a 100755 --- a/liger_iris_pipeline/sky_subtraction/sky_subtraction_imager_step.py +++ b/liger_iris_pipeline/sky_subtraction/sky_subtraction_imager_step.py @@ -13,28 +13,29 @@ class SkySubtractionImagerStep(LigerIRISStep): SkySubtractionImagerStep: Sky subtraction for imager from existing sky file. """ - def process(self, input, sky_bkg): + class_alias = "sky_sub" + + def process(self, input, sky_input): result = input.copy() with datamodels.open(input) as input_model, \ - datamodels.open(sky_bkg) as bkg_model: + datamodels.open(sky_input) as sky_model: # Get subarray model - bkg_model = get_subarray_model(input_model, bkg_model) + sky_model = get_subarray_model(input_model, sky_model) # Subtract the average background from the member - self.log.debug(f"Subtracting background from {input_model.meta.filename} with {bkg_model.meta.filename}") + self.log.debug(f"Subtracting background from {input_model.meta.filename} with {sky_model.meta.filename}") # Subtract the SCI arrays - result.data = input_model.data - bkg_model.data + result.data = input_model.data - sky_model.data # Error handling # ... # Combine the DQ flag arrays using bitwise OR - result.dq = np.bitwise_or(input_model.dq, bkg_model.dq) + result.dq = np.bitwise_or(input_model.dq, sky_model.dq) - # Close the average background image and update the step status - result.meta.cal_step.sky_back_sub = "COMPLETE" + self.status = "COMPLETE" return result diff --git a/liger_iris_pipeline/tests/test_create_flat.py b/liger_iris_pipeline/tests/test_create_flat.py index 7fdaa5a..adb8bd1 100644 --- a/liger_iris_pipeline/tests/test_create_flat.py +++ b/liger_iris_pipeline/tests/test_create_flat.py @@ -2,12 +2,12 @@ import numpy as np import liger_iris_pipeline from liger_iris_pipeline import datamodels +from liger_iris_pipeline.associations import L1Association from liger_iris_pipeline.tests.test_utils import add_meta_data def create_config(): conf = """ - name = "ImagerStage2Pipeline" - class = "liger_iris_pipeline.pipeline.CreateFlatfield" + class = "liger_iris_pipeline.CreateFlatfield" save_results = True [steps] @@ -20,8 +20,8 @@ class = "liger_iris_pipeline.pipeline.CreateFlatfield" def test_create_flat(tmp_path): - # Grab flat field - raw_flat_filename = str(tmp_path / "2024A-P123-044_IRIS_IMG1_FLAT-Y_LVL1_0001-00.fits") + # Create a simulated raw flat + raw_flat_model = datamodels.ImagerModel(instrument='IRIS', data=np.random.normal(loc=1, scale=0.01, size=(4096, 4096))) meta = { 'model_type' : 'ImagerModel', 'target.name': 'FLAT', @@ -40,31 +40,29 @@ def test_create_flat(tmp_path): 'instrument.filter' : 'Y', 'instrument.scale' : 0.004, } - - # Create a simulated raw flat - raw_flat_model = datamodels.ImagerModel(instrument='IRIS', data=np.random.normal(loc=1, scale=0.01, size=(4096, 4096))) add_meta_data(raw_flat_model, meta) + raw_flat_filename = str(tmp_path / "2024A-P123-044_IRIS_IMG1_FLAT-Y_LVL1_0001-00.fits") raw_flat_model.save(raw_flat_filename) # ASN - product ={ - "name": "Test", + asn = L1Association.from_product({ "members": [ { "expname": raw_flat_filename, "exptype": "flat", }, - ] - } + ], + }) # Create a temporary config file conf = create_config() - config_file = tmp_path / "test_config.cfg" + config_file = str(tmp_path / "test_config.cfg") with open(config_file, "w") as f: f.write(conf) # Initialize flatfield pipeline - flat_model, pipeline = liger_iris_pipeline.CreateFlatfield.call(product, config_file=config_file, return_step=True) + pipeline = liger_iris_pipeline.CreateFlatfield(config_file=config_file) + flat_model = pipeline.run(asn, output_dir=str(tmp_path)) # Open dark dark_model = datamodels.open(pipeline.dark_sub.dark_filename) diff --git a/liger_iris_pipeline/tests/test_dark.py b/liger_iris_pipeline/tests/test_dark.py index 56b9d4f..2c67b29 100644 --- a/liger_iris_pipeline/tests/test_dark.py +++ b/liger_iris_pipeline/tests/test_dark.py @@ -3,12 +3,25 @@ import liger_iris_pipeline from liger_iris_pipeline import datamodels -def test_dark_step(): - sci_L1_filename = "/Users/cale/Desktop/Liger_IRIS_Test_Data/IRIS/2024A-P123-044_IRIS_IMG1_SCI-J1458+1013-SIM-Y_LVL1_0001-00.fits" +def test_dark_step(tmp_path): + sci_L1_filename = "liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SCI-J1458+1013-Y-4.0_LVL1_0001-00.fits" input_model = datamodels.open(sci_L1_filename) - step = liger_iris_pipeline.DarkSubtractionStep() + # For dev purposes, no config for real/final test + conf = """ + class = "liger_iris_pipeline.DarkSubtractionStep" + output_dir = "/Users/cale/Desktop/DRS_Testing/" + """ + config_file = str(tmp_path / "test_dark_config.cfg") + with open(config_file, "w") as f: + f.write(conf) + + step = liger_iris_pipeline.DarkSubtractionStep(config_file=config_file) step_output = step.run(sci_L1_filename) dark_model = datamodels.open(step.dark_filename) - np.testing.assert_allclose(step_output.data, input_model.data - dark_model.data) \ No newline at end of file + np.testing.assert_allclose(step_output.data, input_model.data - dark_model.data) + +# from pathlib import Path +# tmp_path = Path("/Users/cale/Desktop/DRS_Testing/") +# test_dark_step(tmp_path) \ No newline at end of file diff --git a/liger_iris_pipeline/tests/test_datamodels.py b/liger_iris_pipeline/tests/test_datamodels.py index 5134d4a..fb0e026 100644 --- a/liger_iris_pipeline/tests/test_datamodels.py +++ b/liger_iris_pipeline/tests/test_datamodels.py @@ -3,7 +3,7 @@ from liger_iris_pipeline import ImagerModel def test_load_liger_image(): - sci_L1_filename = "liger_iris_pipeline/tests/data/2024A-P123-044_Liger_IMG_SCI-J1458+1013-SIM-Y_LVL1_0001-00.fits" + sci_L1_filename = "liger_iris_pipeline/tests/data/2024B-P123-008_Liger_IMG_SCI-J1458+1013-Y-10.0_LVL1_0001-00.fits" input_model = ImagerModel(sci_L1_filename) assert input_model.meta.model_type == "ImagerModel" @@ -17,7 +17,7 @@ def test_load_liger_image(): assert input_model.data.shape == (2048, 2048) def test_load_iris_image(): - sci_L1_filename = "liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SCI-J1458+1013-SIM-Y_LVL1_0001-00.fits" + sci_L1_filename = "liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SCI-J1458+1013-Y-4.0_LVL1_0001-00.fits" input_model = ImagerModel(sci_L1_filename) assert input_model.meta.model_type == "ImagerModel" diff --git a/liger_iris_pipeline/tests/test_flat.py b/liger_iris_pipeline/tests/test_flat.py index 13dceed..b25c70d 100644 --- a/liger_iris_pipeline/tests/test_flat.py +++ b/liger_iris_pipeline/tests/test_flat.py @@ -4,7 +4,7 @@ def test_flat_step(): - sci_L1_filename = "liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SCI-J1458+1013-SIM-Y_LVL1_0001-00.fits" + sci_L1_filename = "liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SCI-J1458+1013-Y-4.0_LVL1_0001-00.fits" input_model = datamodels.open(sci_L1_filename) step = FlatFieldStep() diff --git a/liger_iris_pipeline/tests/test_imager_stage2.py b/liger_iris_pipeline/tests/test_imager_stage2.py index 92ab2d5..adca69c 100644 --- a/liger_iris_pipeline/tests/test_imager_stage2.py +++ b/liger_iris_pipeline/tests/test_imager_stage2.py @@ -1,25 +1,30 @@ # Imports import liger_iris_pipeline import liger_iris_pipeline.datamodels as datamodels +from liger_iris_pipeline.associations import L1Association import numpy as np import os +def create_dark_config(): + conf = """ + class = "liger_iris_pipeline.DarkSubtraction" + save_results = False + output_dir = "/Users/cale/Desktop/DRS_Testing2/" + """ + return conf + def create_config(): conf = """ - name = "ImagerStage2Pipeline" - class = "liger_iris_pipeline.pipeline.ImagerStage2Pipeline" + class = "liger_iris_pipeline.ImagerStage2Pipeline" save_results = True [steps] [[dark_sub]] + config_file = "dark_config.cfg" [[flat_field]] [[sky_sub]] [[assign_wcs]] skip = False - [[photom]] - skip = True - [[resample]] - skip = True """ return conf @@ -31,10 +36,18 @@ def test_imager_stage2(tmp_path): with open(config_file, "w") as f: f.write(conf) + # Create a temporary dark config file + conf = create_dark_config() + config_file_dark = str(tmp_path / "dark_config.cfg") + with open(config_file_dark, "w") as f: + f.write(conf) + # Association - sci_L1_filename = "liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SCI-J1458+1013-SIM-Y_LVL1_0001-00.fits" - sky_bkg_L1_filename = 'liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SKY-SIM-Y_LVL1_0001-00.fits' - product = { + sci_L1_filename = "liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SCI-J1458+1013-Y-4.0_LVL1_0001-00.fits" + sky_bkg_L1_filename = "liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SKY-J1458+1013-Y-4.0_LVL1_0001-00.fits" + + # ASN + asn = L1Association.from_product({ "members": [ { "expname": sci_L1_filename, @@ -45,17 +58,18 @@ def test_imager_stage2(tmp_path): "exptype": "SKY" } ] - } + }) # Create and call the pipeline object - results, pipeline = liger_iris_pipeline.ImagerStage2Pipeline.call(product, config_file=config_file, return_step=True) + pipeline = liger_iris_pipeline.ImagerStage2Pipeline(config_file=config_file) + results = pipeline.run(asn, output_dir=str(tmp_path)) model_result = results[0] # Manual L2 file with datamodels.open(pipeline.dark_sub.dark_filename) as dark_model, \ datamodels.open(pipeline.flat_field.flat_filename) as flat_model, \ - datamodels.open(product['members'][0]['expname']) as sci_model, \ - datamodels.open(product['members'][1]['expname']) as bkg_model: + datamodels.open(sci_L1_filename) as sci_model, \ + datamodels.open(sky_bkg_L1_filename) as bkg_model: ref_data = (sci_model.data - dark_model.data) / flat_model.data - bkg_model.data np.testing.assert_allclose(model_result.data, ref_data, rtol=1e-6) @@ -68,9 +82,21 @@ def test_imager_stage2_subarray(tmp_path): with open(config_file, "w") as f: f.write(conf) + # Create a temporary config file + conf = create_config() + config_file = str(tmp_path / "test_config.cfg") + with open(config_file, "w") as f: + f.write(conf) + + # Create a temporary dark config file + conf = create_dark_config() + config_file_dark = str(tmp_path / "dark_config.cfg") + with open(config_file_dark, "w") as f: + f.write(conf) + # Files - sci_L1_filename = 'liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SCI-J1458+1013-SIM-Y_LVL1_0001-00.fits' - sky_bkg_L1_filename = 'liger_iris_pipeline/tests/data/2024A-P123-044_IRIS_IMG1_SKY-SIM-Y_LVL1_0001-00.fits' + sci_L1_filename = "liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SCI-J1458+1013-Y-4.0_LVL1_0001-00.fits" + sky_bkg_L1_filename = "liger_iris_pipeline/tests/data/2024B-P123-008_IRIS_IMG1_SKY-J1458+1013-Y-4.0_LVL1_0001-00.fits" sci_L1_filename_subarray = str(tmp_path / os.path.basename(sci_L1_filename.replace('-00.fits', '-01.fits'))) # Load the science model @@ -101,7 +127,7 @@ def test_imager_stage2_subarray(tmp_path): input_model.save(sci_L1_filename_subarray) # ASN - product ={ + asn = L1Association.from_product({ "members": [ { "expname": sci_L1_filename_subarray, @@ -112,17 +138,22 @@ def test_imager_stage2_subarray(tmp_path): "exptype": "SKY" } ] - } + }) # Call pipeline with test ASN - results, pipeline = liger_iris_pipeline.ImagerStage2Pipeline.call(product, config_file=config_file, return_step=True) + pipeline = liger_iris_pipeline.ImagerStage2Pipeline(config_file=config_file) + results = pipeline.run(asn, output_dir=str(tmp_path)) model_result = results[0] # Manual L2 # Everntually update this to use a static result with datamodels.open(pipeline.dark_sub.dark_filename) as dark_model, \ datamodels.open(pipeline.flat_field.flat_filename) as flat_model, \ - datamodels.open(product['members'][0]['expname']) as sci_model, \ - datamodels.open(product['members'][1]['expname']) as bkg_model: + datamodels.open(sci_L1_filename_subarray) as sci_model, \ + datamodels.open(sky_bkg_L1_filename) as bkg_model: ref_data = (sci_model.data - dark_model.data[subarray_slice]) / flat_model.data[subarray_slice] - bkg_model.data[subarray_slice] - np.testing.assert_allclose(model_result.data, ref_data, rtol=1e-6) \ No newline at end of file + np.testing.assert_allclose(model_result.data, ref_data, rtol=1e-6) + + +from pathlib import Path +test_imager_stage2(Path("/Users/cale/Desktop/DRS_Testing/")) \ No newline at end of file diff --git a/liger_iris_pipeline/tests/test_utils.py b/liger_iris_pipeline/tests/test_utils.py index ab98764..5ade6b8 100644 --- a/liger_iris_pipeline/tests/test_utils.py +++ b/liger_iris_pipeline/tests/test_utils.py @@ -44,6 +44,7 @@ def add_ifu_wcs_axes(ra : float, dec : float, size : tuple[int, int], scale : fl return meta def get_default_metadata(): + meta = {} # CORE @@ -94,6 +95,7 @@ def get_default_metadata(): meta['exposure.number'] = 1 meta['exposure.sequence_tot'] = 1 meta['exposure.readmode'] = 'DEFAULT' + return meta def get_default_liger_metadata(): @@ -122,9 +124,13 @@ def add_meta_data(model : datamodels.LigerIRISDataModel, meta : dict): # Instrument if meta['instrument.name'] == 'Liger': - meta.update(get_default_liger_metadata()) + _meta = get_default_liger_metadata() + _meta.update(meta) + meta = _meta elif meta['instrument.name'] == 'IRIS': - meta.update(get_default_iris_metadata()) + _meta = get_default_iris_metadata() + _meta.update(meta) + meta = _meta # Time time = Time(meta['exposure.jd_start'], format='jd') @@ -147,7 +153,11 @@ def add_meta_data(model : datamodels.LigerIRISDataModel, meta : dict): elif hasattr(model, 'data') and "IFU" in model.__class__.__name__: meta.update(add_ifu_wcs_axes(ra=meta['target.ra'], dec=meta['target.dec'], size=model.data.shape, scale=meta['instrument.scale']), dw=1) - # Dict to object + merge_model_meta(model, meta) + + return model + +def merge_model_meta(model, meta): for key, value in meta.items(): attrs = key.split('.') target = model.meta @@ -159,8 +169,6 @@ def add_meta_data(model : datamodels.LigerIRISDataModel, meta : dict): if target is not None: setattr(target, attrs[-1], value) - return model - def create_ramp( source : np.ndarray, # e- / s / pixel including all sources @@ -183,4 +191,5 @@ def create_ramp( np.clip(data, 0, np.iinfo(np.int16).max) ramp_model = datamodels.RampModel(instrument=meta['instrument.name'], times=times, data=data, dq=dq) add_meta_data(ramp_model, meta) + ramp_model.meta.data_level = 0 return ramp_model \ No newline at end of file