From 5c5bf7a6754ab204ad72b144a50070ceb5f81487 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Mon, 19 Aug 2024 12:56:46 -0700 Subject: [PATCH 01/39] first attempt --- jwst/cube_build/cube_build_step.py | 36 ++++++++++++++++++++++++++++++ jwst/cube_build/ifu_cube.py | 34 +++++++++++++++++++++++++--- 2 files changed, 67 insertions(+), 3 deletions(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index c675ae338f..e30b719721 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -9,6 +9,7 @@ from . import cube_build from . import ifu_cube from . import data_types +import asdf from ..assign_wcs.util import update_s_region_keyword from ..stpipe import Step, record_step_status @@ -64,6 +65,7 @@ class CubeBuildStep (Step): search_output_file = boolean(default=false) output_use_model = boolean(default=true) # Use filenames in the output models suffix = string(default='s3d') + offset_list = string(default=None) debug_spaxel = string(default='-1 -1 -1') # Default not used """ @@ -236,6 +238,17 @@ def process(self, input): self.pars_input['output_type'] = self.output_type self.log.info(f'Setting output type to: {self.output_type}') +# ________________________________________________________________________________ +# If an offset file is provided do some basic checks on the file and its contents + self.offsets = None + + if self.offset_list is not None: + offsets = self.check_offset_list() + + if offsets is not None: + print(offsets) + self.offsets = offsets +# ________________________________________________________________________________ # Read in Cube Parameter Reference file # identify what reference file has been associated with these input @@ -276,6 +289,7 @@ def process(self, input): 'roiw': self.roiw, 'wavemin': self.wavemin, 'wavemax': self.wavemax, + 'offsets':self.offsets, 'skip_dqflagging': self.skip_dqflagging, 'suffix': self.suffix, 'debug_spaxel': self.debug_spaxel} @@ -530,3 +544,25 @@ def read_user_input(self): # remove duplicates if needed self.pars_input['grating'] = list(set(self.pars_input['grating'])) # ________________________________________________________________________________ + + def check_offset_list(self): + # first check file is asdf + + check_asdf = asdf.util.get_file_type(asdf.generic_io.get_file(self.offset_list)) + if check_asdf == asdf.util.FileType.ASDF: + with asdf.open(self.offset_list) as af: + offsets = af.tree['offsets'] + + for model in self.input_models: + print(model.meta.filename) + file_check = model.meta.filename + if file_check in offsets['filename']: + #print('found file', file_check) + continue + else: + print('In file from association not found in offset list') + print(offsets['filename']) + return None + return offsets + + diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index eb66482dfa..ec606b2e1e 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -69,6 +69,7 @@ def __init__(self, self.cube_pa = pars_cube.get('cube_pa') self.nspax_x = pars_cube.get('nspax_x') self.nspax_y = pars_cube.get('nspax_y') + self.offsets = pars_cube.get('offsets') self.rois = pars_cube.get('rois') self.roiw = pars_cube.get('roiw') self.debug_spaxel = pars_cube.get('debug_spaxel') @@ -1515,9 +1516,12 @@ def map_detector_to_outputframe(self, this_par1, scalerad_det = None x_det = None y_det = None - + offsets = self.offsets + + if self.instrument == 'MIRI': - sky_result = self.map_miri_pixel_to_sky(input_model, this_par1, subtract_background) + sky_result = self.map_miri_pixel_to_sky(input_model, this_par1, subtract_background, + offsets) (x, y, ra, dec, wave_all, slice_no_all, dwave_all, corner_coord_all) = sky_result elif self.instrument == 'NIRSPEC': @@ -1692,7 +1696,8 @@ def map_detector_to_outputframe(self, this_par1, softrad_det, scalerad_det, x_det, y_det # ______________________________________________________________________ - def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background): + def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, + offsets): """Loop over a file and map the detector pixels to the output cube The output frame is on the SKY (ra-dec) @@ -1719,6 +1724,17 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background): dwave = None corner_coord = None + raoffset = 0.0 + decoffset = 0.0 + # pull out ra dec offset if it exists + if offsets is not None: + filename = input_model.meta.filename + index = offsets['filename'].index(filename) + raoffset = offsets['raoffset'][index]/3600.0 + decoffset = offsets['decoffset'][index]/3600.0 + + print('ra and dec offset to apply in degrees', raoffset, decoffset) + # check if background sky matching as been done in mrs_imatch step # If it has not been subtracted and the background has not been # subtracted - subtract it. @@ -1754,6 +1770,8 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background): # if self.coord_system == 'skyalign' or self.coord_system == 'ifualign': ra, dec, wave = input_model.meta.wcs(x, y) + ra = ra + raoffset + dec = dec + decoffset valid1 = ~np.isnan(ra) ra = ra[valid1] dec = dec[valid1] @@ -1793,6 +1811,16 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background): input_model.meta.wcs.output_frame, alpha2, beta - dbeta * pixfrac / 2., wave) + ra1 = ra1 + raoffset + ra2 = ra2 + raoffset + ra3 = ra3 + raoffset + ra4 = ra4 + raoffset + + dec1 = dec1 + decoffset + dec2 = dec2 + decoffset + dec3 = dec3 + decoffset + dec4 = dec4 + decoffset + corner_coord = [ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4] sky_result = (x, y, ra, dec, wave, slice_no, dwave, corner_coord) From cbc2a50b832dc13f7bf184e4bee4f133ea2dba16 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Wed, 21 Aug 2024 13:20:55 -0700 Subject: [PATCH 02/39] First attempt at adding offsets --- jwst/cube_build/cube_build_step.py | 54 +++++++++++++++++++++--------- jwst/cube_build/ifu_cube.py | 45 +++++++++++++++++-------- 2 files changed, 70 insertions(+), 29 deletions(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index e30b719721..5726c4af6c 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -239,14 +239,15 @@ def process(self, input): self.log.info(f'Setting output type to: {self.output_type}') # ________________________________________________________________________________ -# If an offset file is provided do some basic checks on the file and its contents +# If an offset file is provided do some basic checks on the file and its contents. +# The offset list contains a matching list to the files in the association +# used in calspec3 (or offline cube building). +# Each row in the offset list contain a filename, ra offset and dec offset. +# The offset list is an asdf file. self.offsets = None - if self.offset_list is not None: offsets = self.check_offset_list() - if offsets is not None: - print(offsets) self.offsets = offsets # ________________________________________________________________________________ # Read in Cube Parameter Reference file @@ -289,7 +290,7 @@ def process(self, input): 'roiw': self.roiw, 'wavemin': self.wavemin, 'wavemax': self.wavemax, - 'offsets':self.offsets, + 'offsets': self.offsets, 'skip_dqflagging': self.skip_dqflagging, 'suffix': self.suffix, 'debug_spaxel': self.debug_spaxel} @@ -546,23 +547,46 @@ def read_user_input(self): # ________________________________________________________________________________ def check_offset_list(self): - # first check file is asdf - + """Read in an optional ra and dec offsets for each file. + + Summary + ---------- + Check that is file is asdf file. + check the file has the correct format: + For each file in the input assocation check that there is a corresponding + file in the offset file. + Also check that each file in the offset list contain a ra offset and dec offset. + + """ + check_asdf = asdf.util.get_file_type(asdf.generic_io.get_file(self.offset_list)) if check_asdf == asdf.util.FileType.ASDF: with asdf.open(self.offset_list) as af: offsets = af.tree['offsets'] - + + format_failure = False + # Currently the offset list has to have the following keys: filename, raoffset, decoffset + if 'filename' not in offsets.keys(): + self.log.warning('Filename is not listed in the offset list') + format_failure = True + if 'raoffset' not in offsets.keys(): + self.log.warning('raoffset is not listed in the offset list') + format_failure = True + if 'decoffset' not in offsets.keys(): + self.log.warning('decoffset is not listed in the offset list') + format_failure = True + if format_failure: + self.log.warning('Offset list does not have the correct format') + self.log.warning('No offsets are applied') + return None + for model in self.input_models: - print(model.meta.filename) file_check = model.meta.filename if file_check in offsets['filename']: - #print('found file', file_check) + ra = offsets['raoffset'] + dec = offsets['decoffset'] continue else: - print('In file from association not found in offset list') - print(offsets['filename']) + self.log.info('File in assocation is not found in offset list list %s', file_check) return None - return offsets - - + return offsets diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index ec606b2e1e..144804472c 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1316,7 +1316,21 @@ def setup_ifucube_wcs(self): input_file = self.master_table.FileMap[self.instrument][this_a][this_b][k] input_model = datamodels.open(input_file) - +# ________________________________________________________________________________ + # If offsets are provided. Pull in ra and dec offsets. + raoffset = 0.0 + decoffset = 0.0 + # pull out ra dec offset if it exists + if self.offsets is not None: + filename = input_model.meta.filename + index = self.offsets['filename'].index(filename) + raoffset = self.offsets['raoffset'][index] + decoffset = self.offsets['decoffset'][index] + log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", + raoffset, decoffset, filename) + raoffset = raoffset/3600.0 # convert to degress + decoffset = decoffset/3600.0 +# ________________________________________________________________________________ # Find the footprint of the image spectral_found = hasattr(input_model.meta.wcsinfo, 'spectral_region') spatial_found = hasattr(input_model.meta.wcsinfo, 's_region') @@ -1372,15 +1386,15 @@ def setup_ifucube_wcs(self): ca1, cb1, ca2, cb2, ca3, cb3, ca4, cb4, lmin, lmax = ch_corners # now append this model spatial and spectral corner - corner_a.append(ca1) - corner_a.append(ca2) - corner_a.append(ca3) - corner_a.append(ca4) + corner_a.append(ca1 + raoffset) + corner_a.append(ca2 + raoffset) + corner_a.append(ca3 + raoffset) + corner_a.append(ca4 + raoffset) - corner_b.append(cb1) - corner_b.append(cb2) - corner_b.append(cb3) - corner_b.append(cb4) + corner_b.append(cb1 + decoffset) + corner_b.append(cb2 + decoffset) + corner_b.append(cb3 + decoffset) + corner_b.append(cb4 + decoffset) lambda_min.append(lmin) lambda_max.append(lmax) @@ -1518,7 +1532,6 @@ def map_detector_to_outputframe(self, this_par1, y_det = None offsets = self.offsets - if self.instrument == 'MIRI': sky_result = self.map_miri_pixel_to_sky(input_model, this_par1, subtract_background, offsets) @@ -1713,6 +1726,8 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, needed for MIRI data input: datamodel input data model + offsets: dictionary + optional dictionary of ra and dec offsets to apply Returns ------- @@ -1730,10 +1745,12 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, if offsets is not None: filename = input_model.meta.filename index = offsets['filename'].index(filename) - raoffset = offsets['raoffset'][index]/3600.0 - decoffset = offsets['decoffset'][index]/3600.0 - - print('ra and dec offset to apply in degrees', raoffset, decoffset) + raoffset = offsets['raoffset'][index] + decoffset = offsets['decoffset'][index] + log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", + raoffset, decoffset, filename) + raoffset = raoffset/3600.0 # convert to degress + decoffset = decoffset/3600.0 # check if background sky matching as been done in mrs_imatch step # If it has not been subtracted and the background has not been From 7d17e04fc82ba5b893fc845d9a4ba1f9d9f5253d Mon Sep 17 00:00:00 2001 From: jemorrison Date: Tue, 27 Aug 2024 09:15:30 -0700 Subject: [PATCH 03/39] updates --- docs/jwst/cube_build/arguments.rst | 32 +++++++++++++++++++++++++++ jwst/cube_build/cube_build_step.py | 1 + jwst/cube_build/ifu_cube.py | 35 +++++++++++++++++++++--------- 3 files changed, 58 insertions(+), 10 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index a30a181b6c..af13f1e596 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -91,6 +91,13 @@ The following arguments control the size and sampling characteristics of the out ``nspax_y`` The odd integer number of spaxels to use in the y dimension of the tangent plane. +``offset_list = [string]`` + The string contains the name of the file holding ra and dec offsets to apply to each input file. This file + must be an asdf file and the it has a specific format. It is assumed the user has determined the ra and dec + offsets to apply to the data. For details on how to construct the format of the offset file see creating + :ref:`offsets` files. + + ``coord_system [string]`` The default IFU cubes are built on the ra-dec coordinate system (``coord_system=skyalign``). In these cubes north is up and east is left. There are two other coordinate systems an IFU cube can be built on: @@ -122,3 +129,28 @@ A parameter only used for investigating which detector pixels contributed to a c The string is the x,y,z value of the cube spaxel that is being investigated. The numbering starts counting at 0. To print information to the screeen about the x = 10, y = 20, z = 35 spaxel the parameter string value is '10 20 35'. + +.. _offsets: +The offset file is an ASDF formated file : `_ stands for "Advanced Scientific Data. For each input file in the spec3 assocation used to build the IFU cubes an ra and dec offset is provided. The offsets are in arc seconds. Below is an example of how to make an offset file. It is assumed the user has determined the +offsets to apply for each file. The offsets are stored in a python dictionary, `offsets`. The items of this dictionary are `filenames`, `raoffset` and `decoffset`. The cube_building code is expects this dictionary to hold the information +for storing the file names and the associated ra and dec offsets. + +It is assumed there exists a list of files, ra and dec offsets that are feed to this routine. The ra and dec offsets +provided in arcseconds. The cube_building code will apply the ra offsets after multiplying by the +`num` is the number of files. + +import asdf +offsets = {} +offsets['filename'] = [] +offsets['raoffset'] = [] +offsets['decoffset'] = [] +for i in range(num): + + offsets['filename'].append(file[i]) + offsets['raoffset'].append(ra_center1[i]) + offsets['decoffset'].append(dec_center1[i]) + +af = asdf.AsdfFile({'offsets':offsets}) +af.write_to('offsets.asdf') + +The offset asdf filename can be any name, but it must have the `asdf` extension. diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 5726c4af6c..702b496090 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -582,6 +582,7 @@ def check_offset_list(self): for model in self.input_models: file_check = model.meta.filename + print(file_check) if file_check in offsets['filename']: ra = offsets['raoffset'] dec = offsets['decoffset'] diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index 144804472c..a88129657e 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1861,6 +1861,21 @@ def map_nirspec_pixel_to_sky(self, input_model): x, y, ra, dec, lambda, slice_no """ + + # check if we have an ra and dec offset file + raoffset = 0.0 + decoffset = 0.0 + # pull out ra dec offset if it exists + if offsets is not None: + filename = input_model.meta.filename + index = offsets['filename'].index(filename) + raoffset = offsets['raoffset'][index] + decoffset = offsets['decoffset'][index] + log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", + raoffset, decoffset, filename) + raoffset = raoffset/3600.0 # convert to degress + decoffset = decoffset/3600.0 + # initialize the ra,dec, and wavelength arrays # we will loop over slice_nos and fill in values # the flag_det will be set when a slice_no pixel is filled in @@ -2009,19 +2024,19 @@ def map_nirspec_pixel_to_sky(self, input_model): valid_data = np.where(flag_det == 1) y, x = valid_data - ra = ra_det[valid_data] - dec = dec_det[valid_data] + ra = ra_det[valid_data] + raoffset + dec = dec_det[valid_data] + decoffset wave = lam_det[valid_data] slice_no = slice_det[valid_data] dwave = dwave_det[valid_data] - ra1 = ra1_det[valid_data] - ra2 = ra2_det[valid_data] - ra3 = ra3_det[valid_data] - ra4 = ra4_det[valid_data] - dec1 = dec1_det[valid_data] - dec2 = dec2_det[valid_data] - dec3 = dec3_det[valid_data] - dec4 = dec4_det[valid_data] + ra1 = ra1_det[valid_data] + raoffset + ra2 = ra2_det[valid_data] + raoffset + ra3 = ra3_det[valid_data] + raoffset + ra4 = ra4_det[valid_data] + raoffset + dec1 = dec1_det[valid_data] + decoffset + dec2 = dec2_det[valid_data] + decoffset + dec3 = dec3_det[valid_data] + decoffset + dec4 = dec4_det[valid_data] + decoffset corner_coord = [ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4] sky_result = (x, y, ra, dec, wave, slice_no, dwave, corner_coord) From aa5d546f820478fb1f84c550dc6e64f47ab19be5 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Tue, 27 Aug 2024 09:25:56 -0700 Subject: [PATCH 04/39] updates --- docs/jwst/cube_build/arguments.rst | 9 ++++++--- jwst/cube_build/cube_build_step.py | 1 - 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index af13f1e596..6d718358b1 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -131,14 +131,17 @@ A parameter only used for investigating which detector pixels contributed to a c To print information to the screeen about the x = 10, y = 20, z = 35 spaxel the parameter string value is '10 20 35'. .. _offsets: -The offset file is an ASDF formated file : `_ stands for "Advanced Scientific Data. For each input file in the spec3 assocation used to build the IFU cubes an ra and dec offset is provided. The offsets are in arc seconds. Below is an example of how to make an offset file. It is assumed the user has determined the +The offset file is an ASDF formated file : `_ stands for "Advanced Scientific Data. For each +input file in the spec3 assocation used to build the IFU cubes, an ra and dec offset, in arc seconds, is provided. +Below is an example of how to make an ASDF offset file. It is assumed the user has determined the offsets to apply for each file. The offsets are stored in a python dictionary, `offsets`. The items of this dictionary are `filenames`, `raoffset` and `decoffset`. The cube_building code is expects this dictionary to hold the information for storing the file names and the associated ra and dec offsets. It is assumed there exists a list of files, ra and dec offsets that are feed to this routine. The ra and dec offsets -provided in arcseconds. The cube_building code will apply the ra offsets after multiplying by the +provided in arcseconds. The cube_building code will apply the ra offsets after multiplying by the cos(crval2), where crval2 is the +declination center of the IFU cube. `num` is the number of files. - +y import asdf offsets = {} offsets['filename'] = [] diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 702b496090..5726c4af6c 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -582,7 +582,6 @@ def check_offset_list(self): for model in self.input_models: file_check = model.meta.filename - print(file_check) if file_check in offsets['filename']: ra = offsets['raoffset'] dec = offsets['decoffset'] From c121c1fc8ac54ed79c94fc61cff6ed8abbae726c Mon Sep 17 00:00:00 2001 From: jemorrison Date: Wed, 28 Aug 2024 17:15:05 -0700 Subject: [PATCH 05/39] update for cos(dec) --- docs/jwst/cube_build/arguments.rst | 19 ++++++++++--------- jwst/cube_build/ifu_cube.py | 26 +++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 6d718358b1..ab329aa6ee 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -131,24 +131,25 @@ A parameter only used for investigating which detector pixels contributed to a c To print information to the screeen about the x = 10, y = 20, z = 35 spaxel the parameter string value is '10 20 35'. .. _offsets: -The offset file is an ASDF formated file : `_ stands for "Advanced Scientific Data. For each -input file in the spec3 assocation used to build the IFU cubes, an ra and dec offset, in arc seconds, is provided. +The offset file is an ASDF formated file :``_ stands for "Advanced Scientific Data. For each +input file in the spec3 assocation used to build the IFU cubes, there is a corresponding right ascension and declination offset, +given arc seconds. Below is an example of how to make an ASDF offset file. It is assumed the user has determined the -offsets to apply for each file. The offsets are stored in a python dictionary, `offsets`. The items of this dictionary are `filenames`, `raoffset` and `decoffset`. The cube_building code is expects this dictionary to hold the information -for storing the file names and the associated ra and dec offsets. +offsets to apply the data in each file. The offsets are stored in a python dictionary, `offsets`. The items of this dictionary +are `filenames`, `raoffset` and `decoffset`. The IFU cube building code expects this dictionary to hold the information +for storing the file names and the associated ra and dec offsets. The file names should not contain the directory path. -It is assumed there exists a list of files, ra and dec offsets that are feed to this routine. The ra and dec offsets -provided in arcseconds. The cube_building code will apply the ra offsets after multiplying by the cos(crval2), where crval2 is the +It is assumed there exists a list of files, ra and dec offsets that are feed to this method. The ra and dec offsets +provided in arcseconds. The cube building code will apply the ra offsets after dividing by cos(crval2), where crval2 is the declination center of the IFU cube. -`num` is the number of files. -y +Below `num` is the number of files. + import asdf offsets = {} offsets['filename'] = [] offsets['raoffset'] = [] offsets['decoffset'] = [] for i in range(num): - offsets['filename'].append(file[i]) offsets['raoffset'].append(ra_center1[i]) offsets['decoffset'].append(dec_center1[i]) diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index a88129657e..6755e9e2e4 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -114,7 +114,8 @@ def __init__(self, self.naxis3 = None self.cdelt3_normal = None self.rot_angle = None # rotation angle between Ra-Dec and IFU local instrument plane - + self.median_dec = None + self.a_min = 0 self.a_max = 0 self.b_min = 0 @@ -1234,6 +1235,7 @@ def setup_ifucube_wcs(self): # _____________________________________________________________________________ self.cdelt1 = self.spatial_size self.cdelt2 = self.spatial_size + deg2rad = math.pi / 180.0 if self.linear_wavelength: self.cdelt3 = self.spectral_size @@ -1310,6 +1312,25 @@ def setup_ifucube_wcs(self): log.debug(f'Working on data from {this_a}, {this_b}') n = len(self.master_table.FileMap[self.instrument][this_a][this_b]) log.debug('number of files %d', n) + + # find the median center declination if we have an offset file + if self.offsets is not None: + decs = [] + for k in range(n): + input_file = self.master_table.FileMap[self.instrument][this_a][this_b][k] + input_model = datamodels.open(input_file) + spatial_box = input_model.meta.wcsinfo.s_region + s = spatial_box.split(' ') + cb1 = float(s[4]) + cb2 = float(s[6]) + cb3 = float(s[8]) + cb4 = float(s[10]) + m = (cb1 + cb2 + cb3 + cb4)/4 + decs.append(m) + + self.median_dec = np.nanmedian(decs) + print('Median declination ', self.median_dec) + for k in range(n): lmin = 0.0 lmax = 0.0 @@ -1329,6 +1350,7 @@ def setup_ifucube_wcs(self): log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", raoffset, decoffset, filename) raoffset = raoffset/3600.0 # convert to degress + raoffset = raoffset /np.cos(self.median_dec *deg2rad) decoffset = decoffset/3600.0 # ________________________________________________________________________________ # Find the footprint of the image @@ -1738,6 +1760,7 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, slice_no = None # Slice number dwave = None corner_coord = None + deg2rad = math.pi / 180.0 raoffset = 0.0 decoffset = 0.0 @@ -1750,6 +1773,7 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", raoffset, decoffset, filename) raoffset = raoffset/3600.0 # convert to degress + raoffset = raoffset /np.cos(self.median_dec *deg2rad) decoffset = decoffset/3600.0 # check if background sky matching as been done in mrs_imatch step From 44f416a68eab891ea989b6f542f2a3b633a6671c Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 29 Aug 2024 09:47:44 -0700 Subject: [PATCH 06/39] update --- docs/jwst/cube_build/arguments.rst | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index ab329aa6ee..71810e5b76 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -131,6 +131,10 @@ A parameter only used for investigating which detector pixels contributed to a c To print information to the screeen about the x = 10, y = 20, z = 35 spaxel the parameter string value is '10 20 35'. .. _offsets: + +Creating an offset file +----------------------- + The offset file is an ASDF formated file :``_ stands for "Advanced Scientific Data. For each input file in the spec3 assocation used to build the IFU cubes, there is a corresponding right ascension and declination offset, given arc seconds. @@ -144,17 +148,19 @@ provided in arcseconds. The cube building code will apply the ra offsets after d declination center of the IFU cube. Below `num` is the number of files. -import asdf -offsets = {} -offsets['filename'] = [] -offsets['raoffset'] = [] -offsets['decoffset'] = [] -for i in range(num): - offsets['filename'].append(file[i]) - offsets['raoffset'].append(ra_center1[i]) - offsets['decoffset'].append(dec_center1[i]) - -af = asdf.AsdfFile({'offsets':offsets}) -af.write_to('offsets.asdf') + +.. code-block:: python + + import asdf + offsets = {} + offsets['filename'] = [] + offsets['raoffset'] = [] + offsets['decoffset'] = [] + for i in range(num): + offsets['filename'].append(file[i]) + offsets['raoffset'].append(ra_center1[i]) + offsets['decoffset'].append(dec_center1[i]) + af = asdf.AsdfFile({'offsets':offsets}) + af.write_to('offsets.asdf') The offset asdf filename can be any name, but it must have the `asdf` extension. From e1fcac64db1352c0cb9d72c0d00f8886ce0ab80b Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 29 Aug 2024 10:03:26 -0700 Subject: [PATCH 07/39] fix docs --- docs/jwst/cube_build/arguments.rst | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 71810e5b76..3f318941db 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -91,11 +91,11 @@ The following arguments control the size and sampling characteristics of the out ``nspax_y`` The odd integer number of spaxels to use in the y dimension of the tangent plane. -``offset_list = [string]`` +``offset_list [string]`` The string contains the name of the file holding ra and dec offsets to apply to each input file. This file must be an asdf file and the it has a specific format. It is assumed the user has determined the ra and dec - offsets to apply to the data. For details on how to construct the format of the offset file see creating - :ref:`offsets` files. + offsets to apply to the data. For details on how to construct the format of the offset file see + :ref:`offsets`. ``coord_system [string]`` @@ -136,15 +136,14 @@ Creating an offset file ----------------------- The offset file is an ASDF formated file :``_ stands for "Advanced Scientific Data. For each -input file in the spec3 assocation used to build the IFU cubes, there is a corresponding right ascension and declination offset, -given arc seconds. +input file in the spec3 assocation used to build the IFU cubes, the offset files needs to have a corresponding right ascension and declination offset given arc seconds. Below is an example of how to make an ASDF offset file. It is assumed the user has determined the -offsets to apply the data in each file. The offsets are stored in a python dictionary, `offsets`. The items of this dictionary +offsets to apply to the data in each file. The offsets are stored in a python dictionary, `offsets`. The items of this dictionary are `filenames`, `raoffset` and `decoffset`. The IFU cube building code expects this dictionary to hold the information for storing the file names and the associated ra and dec offsets. The file names should not contain the directory path. -It is assumed there exists a list of files, ra and dec offsets that are feed to this method. The ra and dec offsets -provided in arcseconds. The cube building code will apply the ra offsets after dividing by cos(crval2), where crval2 is the +It is assumed there exists a list of files, ra and dec offsets that are feed to this method. The ra and dec offsets need to be +in arcseconds. The cube building code will apply the ra offsets after dividing by cos(crval2), where crval2 is the declination center of the IFU cube. Below `num` is the number of files. From 984c0c7a2c39c33847890e90d71e995bba6b3d2d Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 29 Aug 2024 10:05:12 -0700 Subject: [PATCH 08/39] update docs --- docs/jwst/cube_build/arguments.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 3f318941db..c7c06b0746 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -144,7 +144,7 @@ for storing the file names and the associated ra and dec offsets. The file names It is assumed there exists a list of files, ra and dec offsets that are feed to this method. The ra and dec offsets need to be in arcseconds. The cube building code will apply the ra offsets after dividing by cos(crval2), where crval2 is the -declination center of the IFU cube. +declination center of the IFU cube. The offset asdf filename can be any name, but it must have the `asdf` extension. Below `num` is the number of files. @@ -162,4 +162,4 @@ Below `num` is the number of files. af = asdf.AsdfFile({'offsets':offsets}) af.write_to('offsets.asdf') -The offset asdf filename can be any name, but it must have the `asdf` extension. + From afec305c902697c474f4004c981abd03fdd670b4 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 5 Sep 2024 06:48:53 -0700 Subject: [PATCH 09/39] added schema to cube_build --- jwst/cube_build/cube_build_step.py | 17 ++++++++------- jwst/cube_build/ifu_cube.py | 25 +++++++++++----------- jwst/cube_build/ifuoffset.schema.yaml | 30 +++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 20 deletions(-) create mode 100644 jwst/cube_build/ifuoffset.schema.yaml diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 5726c4af6c..3af0d8d6ea 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -41,7 +41,7 @@ class CubeBuildStep (Step): spec = """ channel = option('1','2','3','4','all',default='all') # Channel - band = option('short','medium','long','short-medium','short-long','medium-short', \ + band = option('short','medium','long','short-medium','short-long','medium-short', 'medium-long', 'long-short', 'long-medium','all',default='all') # Band grating = option('prism','g140m','g140h','g235m','g235h','g395m','g395h','all',default='all') # Grating filter = option('clear','f100lp','f070lp','f170lp','f290lp','all',default='all') # Filter @@ -65,7 +65,7 @@ class CubeBuildStep (Step): search_output_file = boolean(default=false) output_use_model = boolean(default=true) # Use filenames in the output models suffix = string(default='s3d') - offset_list = string(default=None) + offset_file = string(default=None) debug_spaxel = string(default='-1 -1 -1') # Default not used """ @@ -245,8 +245,8 @@ def process(self, input): # Each row in the offset list contain a filename, ra offset and dec offset. # The offset list is an asdf file. self.offsets = None - if self.offset_list is not None: - offsets = self.check_offset_list() + if self.offset_file is not None: + offsets = self.check_offset_file() if offsets is not None: self.offsets = offsets # ________________________________________________________________________________ @@ -546,7 +546,8 @@ def read_user_input(self): self.pars_input['grating'] = list(set(self.pars_input['grating'])) # ________________________________________________________________________________ - def check_offset_list(self): + + def check_offset_file(self): """Read in an optional ra and dec offsets for each file. Summary @@ -559,9 +560,11 @@ def check_offset_list(self): """ - check_asdf = asdf.util.get_file_type(asdf.generic_io.get_file(self.offset_list)) + af = asdf.open(self.offset_file, custom_schema = 'ifuoffset_schema.yaml') + + check_asdf = asdf.util.get_file_type(asdf.generic_io.get_file(self.offset_file)) if check_asdf == asdf.util.FileType.ASDF: - with asdf.open(self.offset_list) as af: + with asdf.open(self.offset_file) as af: offsets = af.tree['offsets'] format_failure = False diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index 6755e9e2e4..ad27bad270 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1329,7 +1329,15 @@ def setup_ifucube_wcs(self): decs.append(m) self.median_dec = np.nanmedian(decs) - print('Median declination ', self.median_dec) + # fold in the median_dec information into ra offset + noffsets = len(self.offsets['raoffset']) + # convert ra and dec offsets to degrees and adjust ra offset for cos(dec) + for im in range(noffsets): + self.offsets['raoffset'][im] = (self.offsets['raoffset'][im]/3600.0)/np.cos(self.median_dec*deg2rad) + self.offsets['decoffset'][im] = self.offsets['decoffset'][im]/3600.0 + + print(self.offsets['raoffset']) + print(self.offsets['decoffset']) for k in range(n): lmin = 0.0 @@ -1347,11 +1355,6 @@ def setup_ifucube_wcs(self): index = self.offsets['filename'].index(filename) raoffset = self.offsets['raoffset'][index] decoffset = self.offsets['decoffset'][index] - log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", - raoffset, decoffset, filename) - raoffset = raoffset/3600.0 # convert to degress - raoffset = raoffset /np.cos(self.median_dec *deg2rad) - decoffset = decoffset/3600.0 # ________________________________________________________________________________ # Find the footprint of the image spectral_found = hasattr(input_model.meta.wcsinfo, 'spectral_region') @@ -1770,11 +1773,9 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, index = offsets['filename'].index(filename) raoffset = offsets['raoffset'][index] decoffset = offsets['decoffset'][index] - log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", - raoffset, decoffset, filename) - raoffset = raoffset/3600.0 # convert to degress - raoffset = raoffset /np.cos(self.median_dec *deg2rad) - decoffset = decoffset/3600.0 + log.info("Ra and dec offset (arc seconds) applied to file :%8.6f, %8.6f, %s", + raoffset*3600.0*np.cos(self.median_dec*deg2rad), + decoffset*3600.0, filename) # check if background sky matching as been done in mrs_imatch step # If it has not been subtracted and the background has not been @@ -1897,8 +1898,6 @@ def map_nirspec_pixel_to_sky(self, input_model): decoffset = offsets['decoffset'][index] log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", raoffset, decoffset, filename) - raoffset = raoffset/3600.0 # convert to degress - decoffset = decoffset/3600.0 # initialize the ra,dec, and wavelength arrays # we will loop over slice_nos and fill in values diff --git a/jwst/cube_build/ifuoffset.schema.yaml b/jwst/cube_build/ifuoffset.schema.yaml new file mode 100644 index 0000000000..c003afcb64 --- /dev/null +++ b/jwst/cube_build/ifuoffset.schema.yaml @@ -0,0 +1,30 @@ +%YAML 1.1 +--- +$schema: "http://stsci.edu/schemas/asdf/asdf-schema-1.0.0" +id: "http://stsci.edu/schemas/yaml-schema/ifuoffset.schema" + +title: IFUoffset reference file model +type: object +properties: + meta: + type: object + properties: + units: + description: Units of the ra and dec offset values. + anyOf: + - type: string + - $ref: http://stsci.edu/schemas/asdf/unit/unit-1.0.0 + offsets: + description: dictionary defining offsets values + type: object + items: + type: object + properties: + filename: + type: string + raoffset: + type: number + decoffset: + type: number + +required: [meta, offsets] From 8e464b24f211599200914c63160dab0fa4fb1010 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 5 Sep 2024 07:13:21 -0700 Subject: [PATCH 10/39] trying to include offset yaml file --- jwst/cube_build/cube_build_step.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 3af0d8d6ea..79d69c765e 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -7,6 +7,7 @@ from jwst.lib.pipe_utils import match_nans_and_flags from . import cube_build +from . import ifuoffset.schema.yaml from . import ifu_cube from . import data_types import asdf @@ -41,7 +42,7 @@ class CubeBuildStep (Step): spec = """ channel = option('1','2','3','4','all',default='all') # Channel - band = option('short','medium','long','short-medium','short-long','medium-short', + band = option('short','medium','long','short-medium','short-long','medium-short', \ 'medium-long', 'long-short', 'long-medium','all',default='all') # Band grating = option('prism','g140m','g140h','g235m','g235h','g395m','g395h','all',default='all') # Grating filter = option('clear','f100lp','f070lp','f170lp','f290lp','all',default='all') # Filter From bb351f8fbb4c8110343ebb7288a35db9b42b0247 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Sun, 8 Sep 2024 13:46:47 -0700 Subject: [PATCH 11/39] corrected using a local schema file --- docs/jwst/cube_build/arguments.rst | 24 ++++++++----- jwst/cube_build/cube_build_step.py | 49 ++++++++++++--------------- jwst/cube_build/ifu_cube.py | 3 -- jwst/cube_build/ifuoffset.schema.yaml | 43 ++++++++++++----------- 4 files changed, 57 insertions(+), 62 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index c7c06b0746..4cfcc95ed1 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -151,15 +151,21 @@ Below `num` is the number of files. .. code-block:: python import asdf - offsets = {} - offsets['filename'] = [] - offsets['raoffset'] = [] - offsets['decoffset'] = [] + filename = [] + raoffset = [] + decoffset = [] for i in range(num): - offsets['filename'].append(file[i]) - offsets['raoffset'].append(ra_center1[i]) - offsets['decoffset'].append(dec_center1[i]) - af = asdf.AsdfFile({'offsets':offsets}) - af.write_to('offsets.asdf') + filename.append(file[i]) + raoffset.append(ra_center1[i]) + decoffset.append(dec_center1[i]) + + tree = { + "units": str(u.arcsec), + "filename": filename, + "raoffset": raoffset, + "decoffset": decoffset + } + af = asdf.AsdfFile(tree) + af.write_to(input_dir + 'offsets.asdf') diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 79d69c765e..a9e121efc5 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -2,17 +2,16 @@ """ import time - from jwst.datamodels import ModelContainer from jwst.lib.pipe_utils import match_nans_and_flags from . import cube_build -from . import ifuoffset.schema.yaml from . import ifu_cube from . import data_types import asdf from ..assign_wcs.util import update_s_region_keyword from ..stpipe import Step, record_step_status +from pathlib import Path __all__ = ["CubeBuildStep"] @@ -547,7 +546,6 @@ def read_user_input(self): self.pars_input['grating'] = list(set(self.pars_input['grating'])) # ________________________________________________________________________________ - def check_offset_file(self): """Read in an optional ra and dec offsets for each file. @@ -561,36 +559,31 @@ def check_offset_file(self): """ - af = asdf.open(self.offset_file, custom_schema = 'ifuoffset_schema.yaml') - - check_asdf = asdf.util.get_file_type(asdf.generic_io.get_file(self.offset_file)) - if check_asdf == asdf.util.FileType.ASDF: - with asdf.open(self.offset_file) as af: - offsets = af.tree['offsets'] - - format_failure = False - # Currently the offset list has to have the following keys: filename, raoffset, decoffset - if 'filename' not in offsets.keys(): - self.log.warning('Filename is not listed in the offset list') - format_failure = True - if 'raoffset' not in offsets.keys(): - self.log.warning('raoffset is not listed in the offset list') - format_failure = True - if 'decoffset' not in offsets.keys(): - self.log.warning('decoffset is not listed in the offset list') - format_failure = True - if format_failure: - self.log.warning('Offset list does not have the correct format') - self.log.warning('No offsets are applied') + # validate the offset file using the schema file + DATA_PATH = Path(__file__).parent + af = asdf.open(self.offset_file, custom_schema=DATA_PATH/'ifuoffset.schema.yaml') + + offset_filename = af['filename'] + offset_ra = af['raoffset'] + offset_dec = af['decoffset'] + offset_unit = af['units'] + + if offset_unit != 'arcsec': + self.log.error('Provide the offset units in units of arcsec ') + self.log.error('Turning off adjusting by offsets ') return None for model in self.input_models: file_check = model.meta.filename - if file_check in offsets['filename']: - ra = offsets['raoffset'] - dec = offsets['decoffset'] + if file_check in offset_filename: continue else: - self.log.info('File in assocation is not found in offset list list %s', file_check) + self.log.error('File in assocation is not found in offset list list %s', file_check) + self.log.error('Turning off adjusting by offsets') return None + offsets = {} + offsets['filename'] = offset_filename + offsets['raoffset'] = offset_ra + offsets['decoffset'] = offset_dec + return offsets diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index ad27bad270..1033c7a26c 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1335,9 +1335,6 @@ def setup_ifucube_wcs(self): for im in range(noffsets): self.offsets['raoffset'][im] = (self.offsets['raoffset'][im]/3600.0)/np.cos(self.median_dec*deg2rad) self.offsets['decoffset'][im] = self.offsets['decoffset'][im]/3600.0 - - print(self.offsets['raoffset']) - print(self.offsets['decoffset']) for k in range(n): lmin = 0.0 diff --git a/jwst/cube_build/ifuoffset.schema.yaml b/jwst/cube_build/ifuoffset.schema.yaml index c003afcb64..c2c7d40c6a 100644 --- a/jwst/cube_build/ifuoffset.schema.yaml +++ b/jwst/cube_build/ifuoffset.schema.yaml @@ -2,29 +2,28 @@ --- $schema: "http://stsci.edu/schemas/asdf/asdf-schema-1.0.0" id: "http://stsci.edu/schemas/yaml-schema/ifuoffset.schema" - title: IFUoffset reference file model type: object properties: - meta: - type: object - properties: - units: - description: Units of the ra and dec offset values. - anyOf: - - type: string - - $ref: http://stsci.edu/schemas/asdf/unit/unit-1.0.0 - offsets: - description: dictionary defining offsets values - type: object - items: - type: object - properties: - filename: - type: string - raoffset: - type: number - decoffset: - type: number + units: + description: Units of the ra and dec offset values. + anyOf: + - type: string + - $ref: http://stsci.edu/schemas/asdf/unit/unit-1.0.0 + filename: + description: list of filenames + type: array + items: + type: string + raoffset: + description: list of ra offsets + type: array + items: + type: number + decoffset: + descrition: list of dec offsets + type: array + items: + type: number -required: [meta, offsets] +required: [filename, raoffset, decoffset, units] From e20bd6a4adf62734d209a6037a49d8db07ea96bc Mon Sep 17 00:00:00 2001 From: jemorrison Date: Mon, 9 Sep 2024 07:00:56 -0700 Subject: [PATCH 12/39] fix conflict --- jwst/cube_build/cube_build_step.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index a9e121efc5..4b2d6133c3 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -4,7 +4,6 @@ import time from jwst.datamodels import ModelContainer from jwst.lib.pipe_utils import match_nans_and_flags - from . import cube_build from . import ifu_cube from . import data_types @@ -237,7 +236,6 @@ def process(self, input): self.output_type = 'channel' self.pars_input['output_type'] = self.output_type self.log.info(f'Setting output type to: {self.output_type}') - # ________________________________________________________________________________ # If an offset file is provided do some basic checks on the file and its contents. # The offset list contains a matching list to the files in the association From 798a5f71c8dfb30abb24d5d6830cdb2c319464bc Mon Sep 17 00:00:00 2001 From: jemorrison Date: Mon, 9 Sep 2024 11:15:44 -0700 Subject: [PATCH 13/39] added more checks for offset file --- jwst/cube_build/cube_build_step.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 4b2d6133c3..069af6bdbc 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -559,7 +559,12 @@ def check_offset_file(self): # validate the offset file using the schema file DATA_PATH = Path(__file__).parent - af = asdf.open(self.offset_file, custom_schema=DATA_PATH/'ifuoffset.schema.yaml') + try: + af = asdf.open(self.offset_file, custom_schema=DATA_PATH/'ifuoffset.schema.yaml') + except + self.log.error('Validation Error for offset file') + self.log.error('Turning off adjusting by offsets') + return None offset_filename = af['filename'] offset_ra = af['raoffset'] From a503c29af04e7782f4eaa17fb7469230abef0721 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Mon, 9 Sep 2024 12:53:52 -0700 Subject: [PATCH 14/39] fix typo --- jwst/cube_build/cube_build_step.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 069af6bdbc..d5f36cdefb 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -561,7 +561,7 @@ def check_offset_file(self): DATA_PATH = Path(__file__).parent try: af = asdf.open(self.offset_file, custom_schema=DATA_PATH/'ifuoffset.schema.yaml') - except + except: self.log.error('Validation Error for offset file') self.log.error('Turning off adjusting by offsets') return None From 0e7cc1b7882f1848229d7d7b7aa4bc9188b893d2 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Mon, 9 Sep 2024 14:43:17 -0700 Subject: [PATCH 15/39] attempt on unit test --- jwst/cube_build/tests/test_configuration.py | 78 ++++++++++++++++++++- 1 file changed, 77 insertions(+), 1 deletion(-) diff --git a/jwst/cube_build/tests/test_configuration.py b/jwst/cube_build/tests/test_configuration.py index e3783c1ca8..9619219e28 100644 --- a/jwst/cube_build/tests/test_configuration.py +++ b/jwst/cube_build/tests/test_configuration.py @@ -3,12 +3,15 @@ """ import pytest - +import asdf from stdatamodels.jwst import datamodels +import astropy.units as u +from jwst.cube_build import CubeBuildStep from jwst.cube_build import cube_build from jwst.cube_build import file_table + wcsinfo = { 'dec_ref': -0.00244536159612126, 'ra_ref': -0.00205553321270217, @@ -100,6 +103,23 @@ } +@pytest.fixture(scope='function') +def offset_file(): + """ Generate a offset file """ + + filename = ['test1.fits', 'test2.fits'] + raoffset = [0.0, 0.1] + decoffset = [0.0, 0.15] + tree = { + "units": str(u.arcsec), + "filename": filename, + "raoffset": raoffset, + "decoffset": decoffset + } + af = asdf.AsdfFile(tree) + return af + + @pytest.fixture(scope='function') def miri_ifushort_short(): """ Generate a IFU image """ @@ -113,6 +133,30 @@ def miri_ifushort_short(): return input_model +@pytest.fixture(scope='function') +def miri_ifushort_short_2files(): + """ Generate a IFU image """ + + input_model1 = datamodels.IFUImageModel() + input_model1.meta.wcsinfo._instance.update(wcsinfo) + input_model1.meta.instrument._instance.update(mirifushort_short) + input_model1.meta.observation._instance.update(observation) + input_model1.meta.subarray._instance.update(subarray) + input_model1.meta.cal_step.assign_wcs = 'COMPLETE' + + input_model2 = datamodels.IFUImageModel() + input_model2.meta.wcsinfo._instance.update(wcsinfo) + input_model2.meta.instrument._instance.update(mirifushort_short) + input_model2.meta.observation._instance.update(observation) + input_model2.meta.subarray._instance.update(subarray) + input_model2.meta.cal_step.assign_wcs = 'COMPLETE' + + input_models = [] + input_models.append(input_model1) + input_models.append(input_model1) + return input_models + + @pytest.fixture(scope='function') def miri_full_coverage(): """ Generate a IFU images SHORT, LONG for all three bands """ @@ -467,3 +511,35 @@ def test_calspec3_config_nirspec_multi(tmp_cwd, nirspec_medium_coverage): assert cube_pars['1']['par1'] == ['g140m', 'g235m'] assert cube_pars['1']['par2'] == ['f100lp', 'f170lp'] + + +def test_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): + """ Test validation of the offset configuration""" + + pars_input = {} + pars_input['channel'] = [] + pars_input['subchannel'] = [] + pars_input['filter'] = [] + pars_input['grating'] = [] + output_type = 'band' + weighting = 'drizzle' + par_filename = 'None' + + pars = { + 'channel': pars_input['channel'], + 'subchannel': pars_input['subchannel'], + 'grating': pars_input['grating'], + 'filter': pars_input['filter'], + 'weighting': weighting, + 'output_type': output_type} + + cubeinfo = cube_build.CubeData( + miri_ifushort_short_2files, + par_filename, + **pars) + + offsets = CubeBuildStep.check_offset_file(cubeinfo.input_models) + + + # want to test that offsets is None with it fails or Dictionary when it works + From 9446cadfcaff919842ec5801feedea1462555302 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Wed, 11 Sep 2024 19:05:52 -0700 Subject: [PATCH 16/39] added a unit test --- jwst/cube_build/cube_build_step.py | 2 + jwst/cube_build/tests/test_configuration.py | 87 ++++++++++++++------- 2 files changed, 62 insertions(+), 27 deletions(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index d5f36cdefb..7a977d15b0 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -559,6 +559,7 @@ def check_offset_file(self): # validate the offset file using the schema file DATA_PATH = Path(__file__).parent + try: af = asdf.open(self.offset_file, custom_schema=DATA_PATH/'ifuoffset.schema.yaml') except: @@ -589,4 +590,5 @@ def check_offset_file(self): offsets['raoffset'] = offset_ra offsets['decoffset'] = offset_dec + af.close() return offsets diff --git a/jwst/cube_build/tests/test_configuration.py b/jwst/cube_build/tests/test_configuration.py index 9619219e28..d155b7ac82 100644 --- a/jwst/cube_build/tests/test_configuration.py +++ b/jwst/cube_build/tests/test_configuration.py @@ -103,21 +103,46 @@ } -@pytest.fixture(scope='function') -def offset_file(): +@pytest.fixture(scope='module') +def offset_file(tmp_path_factory): """ Generate a offset file """ - filename = ['test1.fits', 'test2.fits'] + filename = tmp_path_factory.mktemp('offset') + filename = filename / 'offset.asdf' + + testfile = ['test1.fits', 'test2.fits'] raoffset = [0.0, 0.1] decoffset = [0.0, 0.15] tree = { "units": str(u.arcsec), - "filename": filename, + "filename": testfile, "raoffset": raoffset, "decoffset": decoffset } af = asdf.AsdfFile(tree) - return af + af.write_to(filename) + return filename + + +@pytest.fixture(scope='module') +def offset_file_arcmin(tmp_path_factory): + """ Generate a offset file with units = arcmin """ + + filename = tmp_path_factory.mktemp('offset_arcmin') + filename = filename / 'offset_arcmin.asdf' + + testfile = ['test1.fits', 'test2.fits'] + raoffset = [0.0, 0.1] + decoffset = [0.0, 0.15] + tree = { + "units": str(u.arcmin), + "filename": testfile, + "raoffset": raoffset, + "decoffset": decoffset + } + af = asdf.AsdfFile(tree) + af.write_to(filename) + return filename @pytest.fixture(scope='function') @@ -143,6 +168,7 @@ def miri_ifushort_short_2files(): input_model1.meta.observation._instance.update(observation) input_model1.meta.subarray._instance.update(subarray) input_model1.meta.cal_step.assign_wcs = 'COMPLETE' + input_model1.meta.filename = 'test1.fits' input_model2 = datamodels.IFUImageModel() input_model2.meta.wcsinfo._instance.update(wcsinfo) @@ -150,10 +176,11 @@ def miri_ifushort_short_2files(): input_model2.meta.observation._instance.update(observation) input_model2.meta.subarray._instance.update(subarray) input_model2.meta.cal_step.assign_wcs = 'COMPLETE' + input_model2.meta.filename = 'test2.fits' input_models = [] input_models.append(input_model1) - input_models.append(input_model1) + input_models.append(input_model2) return input_models @@ -516,30 +543,36 @@ def test_calspec3_config_nirspec_multi(tmp_cwd, nirspec_medium_coverage): def test_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): """ Test validation of the offset configuration""" - pars_input = {} - pars_input['channel'] = [] - pars_input['subchannel'] = [] - pars_input['filter'] = [] - pars_input['grating'] = [] - output_type = 'band' - weighting = 'drizzle' - par_filename = 'None' + # first test that it is a valid asdf file and has what is needed + step = CubeBuildStep() + step.input_models = miri_ifushort_short_2files + + step.offset_file = offset_file + offsets = step.check_offset_file() + assert isinstance(offsets, dict) - pars = { - 'channel': pars_input['channel'], - 'subchannel': pars_input['subchannel'], - 'grating': pars_input['grating'], - 'filter': pars_input['filter'], - 'weighting': weighting, - 'output_type': output_type} +def test2_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): + """ Test validation of the offset configuration""" - cubeinfo = cube_build.CubeData( - miri_ifushort_short_2files, - par_filename, - **pars) + # Test changing one of the filenames so it is not in the list given + # in the offset_file + step = CubeBuildStep() + step.input_models = miri_ifushort_short_2files + + miri_ifushort_short_2files[0].meta.filename = 'test3.fits' + step.offset_file = offset_file + offsets = step.check_offset_file() + assert offsets is None - offsets = CubeBuildStep.check_offset_file(cubeinfo.input_models) +def test_offset_file_config2(tmp_cwd, miri_ifushort_short_2files, offset_file_arcmin): + """ Test validation of the offset configuration""" - # want to test that offsets is None with it fails or Dictionary when it works + # test is the if the user set the units to arcmins + step = CubeBuildStep() + step.input_models = miri_ifushort_short_2files + + step.offset_file = offset_file_arcmin + offsets = step.check_offset_file() + assert offsets is None From 0fd9af7c641021624a9e5ece29ef9789c7045343 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 12 Sep 2024 06:29:54 -0700 Subject: [PATCH 17/39] fix api for nirspec --- jwst/cube_build/ifu_cube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index 1033c7a26c..9545cf4451 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1866,7 +1866,7 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, return sky_result # ______________________________________________________________________ - def map_nirspec_pixel_to_sky(self, input_model): + def map_nirspec_pixel_to_sky(self, input_model, offsets): """Loop over a file and map the detector pixels to the output cube From 407c21653420fb595ef4dc41e75f43b2c196dd4d Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 12 Sep 2024 10:35:35 -0700 Subject: [PATCH 18/39] added closing asdf file --- jwst/cube_build/cube_build_step.py | 3 +++ jwst/cube_build/tests/test_configuration.py | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 7a977d15b0..08f00d2d52 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -567,6 +567,7 @@ def check_offset_file(self): self.log.error('Turning off adjusting by offsets') return None + offset_filename = af['filename'] offset_ra = af['raoffset'] offset_dec = af['decoffset'] @@ -575,6 +576,7 @@ def check_offset_file(self): if offset_unit != 'arcsec': self.log.error('Provide the offset units in units of arcsec ') self.log.error('Turning off adjusting by offsets ') + af.close() return None for model in self.input_models: @@ -584,6 +586,7 @@ def check_offset_file(self): else: self.log.error('File in assocation is not found in offset list list %s', file_check) self.log.error('Turning off adjusting by offsets') + af.close() return None offsets = {} offsets['filename'] = offset_filename diff --git a/jwst/cube_build/tests/test_configuration.py b/jwst/cube_build/tests/test_configuration.py index d155b7ac82..0ef5ab1e2e 100644 --- a/jwst/cube_build/tests/test_configuration.py +++ b/jwst/cube_build/tests/test_configuration.py @@ -121,6 +121,7 @@ def offset_file(tmp_path_factory): } af = asdf.AsdfFile(tree) af.write_to(filename) + af.close() return filename @@ -128,7 +129,7 @@ def offset_file(tmp_path_factory): def offset_file_arcmin(tmp_path_factory): """ Generate a offset file with units = arcmin """ - filename = tmp_path_factory.mktemp('offset_arcmin') + filename = tmp_path_factory.mktemp('offset') filename = filename / 'offset_arcmin.asdf' testfile = ['test1.fits', 'test2.fits'] From a27cdaaeb2e1a4ab5c49d6a6b2a94a0d8e757caf Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 12 Sep 2024 13:12:39 -0700 Subject: [PATCH 19/39] Update Change Log --- CHANGES.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index db515eadf3..32b9ab706d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -82,6 +82,8 @@ cube_build - Replaced deep copies of NIRSpec WCS objects within most loops. [#8793] +- Allow the user to provide ra and dec shifts to apply for each file to fine tune the WCS. [#JP-3364] + datamodels ---------- From c3dbb3de750fae436919652b58d47bfb598b8295 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 12 Sep 2024 17:28:36 -0700 Subject: [PATCH 20/39] updates to docs --- CHANGES.rst | 3 ++- docs/jwst/cube_build/arguments.rst | 17 +++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 32b9ab706d..4e8212be57 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -82,7 +82,8 @@ cube_build - Replaced deep copies of NIRSpec WCS objects within most loops. [#8793] -- Allow the user to provide ra and dec shifts to apply for each file to fine tune the WCS. [#JP-3364] +- Allow the user to provide ra and dec shifts to apply for each file to fine + tune the WCS. [#JP-3364] datamodels ---------- diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 4cfcc95ed1..633cdd68f8 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -91,7 +91,7 @@ The following arguments control the size and sampling characteristics of the out ``nspax_y`` The odd integer number of spaxels to use in the y dimension of the tangent plane. -``offset_list [string]`` +``offset_file [string]`` The string contains the name of the file holding ra and dec offsets to apply to each input file. This file must be an asdf file and the it has a specific format. It is assumed the user has determined the ra and dec offsets to apply to the data. For details on how to construct the format of the offset file see @@ -138,19 +138,19 @@ Creating an offset file The offset file is an ASDF formated file :``_ stands for "Advanced Scientific Data. For each input file in the spec3 assocation used to build the IFU cubes, the offset files needs to have a corresponding right ascension and declination offset given arc seconds. Below is an example of how to make an ASDF offset file. It is assumed the user has determined the -offsets to apply to the data in each file. The offsets are stored in a python dictionary, `offsets`. The items of this dictionary -are `filenames`, `raoffset` and `decoffset`. The IFU cube building code expects this dictionary to hold the information -for storing the file names and the associated ra and dec offsets. The file names should not contain the directory path. +offsets to apply to the data in each file. The offsets information is stored in three lists: + `filenames`, `raoffset` and `decoffset`. The units of the ra and dec offsets + are required to be in the offset set file and only the unit, `arcsec`, is allowed. The file names should +not contain the directory path. The offset asdf filename can be any name, but it must have the `asdf` extension. -It is assumed there exists a list of files, ra and dec offsets that are feed to this method. The ra and dec offsets need to be -in arcseconds. The cube building code will apply the ra offsets after dividing by cos(crval2), where crval2 is the -declination center of the IFU cube. The offset asdf filename can be any name, but it must have the `asdf` extension. Below `num` is the number of files. .. code-block:: python import asdf + import astropy.units as u + filename = [] raoffset = [] decoffset = [] @@ -166,6 +166,7 @@ Below `num` is the number of files. "decoffset": decoffset } af = asdf.AsdfFile(tree) - af.write_to(input_dir + 'offsets.asdf') + af.write_to(input_dir + 'offsets.asdf') + af.close() From d9183228d259fa9628f240c589a7e166b714fcf5 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Fri, 13 Sep 2024 08:28:59 -0700 Subject: [PATCH 21/39] update docs --- docs/jwst/cube_build/arguments.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 633cdd68f8..abd08926b1 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -136,13 +136,12 @@ Creating an offset file ----------------------- The offset file is an ASDF formated file :``_ stands for "Advanced Scientific Data. For each -input file in the spec3 assocation used to build the IFU cubes, the offset files needs to have a corresponding right ascension and declination offset given arc seconds. +input file in the spec3 assocation used to build the IFU cubes, the offset files needs to have a corresponding right ascension and declination offset given arc seconds. Below is an example of how to make an ASDF offset file. It is assumed the user has determined the offsets to apply to the data in each file. The offsets information is stored in three lists: - `filenames`, `raoffset` and `decoffset`. The units of the ra and dec offsets - are required to be in the offset set file and only the unit, `arcsec`, is allowed. The file names should +`filenames`, `raoffset` and `decoffset`. The units of the ra and dec offsets +are required to be in the offset file and only the unit, `arcsec`, is allowed. The file names should not contain the directory path. The offset asdf filename can be any name, but it must have the `asdf` extension. - Below `num` is the number of files. From 7cba5e4fc4ac84cbdd9fc116e58f7881449e4c6d Mon Sep 17 00:00:00 2001 From: jemorrison Date: Fri, 13 Sep 2024 08:56:26 -0700 Subject: [PATCH 22/39] fixed nirspec offset issue --- jwst/cube_build/ifu_cube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index 9545cf4451..56e5b72f4c 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1560,7 +1560,7 @@ def map_detector_to_outputframe(self, this_par1, (x, y, ra, dec, wave_all, slice_no_all, dwave_all, corner_coord_all) = sky_result elif self.instrument == 'NIRSPEC': - sky_result = self.map_nirspec_pixel_to_sky(input_model) + sky_result = self.map_nirspec_pixel_to_sky(input_model, offsets) (x, y, ra, dec, wave_all, slice_no_all, dwave_all, corner_coord_all) = sky_result # ______________________________________________________________________________ From add9f8b31f0942ead094b217962d4ca291bdefcd Mon Sep 17 00:00:00 2001 From: jemorrison Date: Fri, 13 Sep 2024 09:12:39 -0700 Subject: [PATCH 23/39] CHANGES.rst --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 4e8212be57..bc92289e1e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -83,7 +83,7 @@ cube_build - Replaced deep copies of NIRSpec WCS objects within most loops. [#8793] - Allow the user to provide ra and dec shifts to apply for each file to fine - tune the WCS. [#JP-3364] + tune the WCS. [#8720] datamodels ---------- From 30370f6d5a8ca18cec74206ca7b239d0f11f6d31 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Fri, 13 Sep 2024 20:18:51 -0700 Subject: [PATCH 24/39] update docs --- docs/jwst/cube_build/arguments.rst | 40 ++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index abd08926b1..88e0e2c8dd 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -92,9 +92,8 @@ The following arguments control the size and sampling characteristics of the out The odd integer number of spaxels to use in the y dimension of the tangent plane. ``offset_file [string]`` - The string contains the name of the file holding ra and dec offsets to apply to each input file. This file - must be an asdf file and the it has a specific format. It is assumed the user has determined the ra and dec - offsets to apply to the data. For details on how to construct the format of the offset file see + The string contains the name of the file holding RA and Dec offsets to apply to each input file. This file + must be an asdf file with a specific format. For details on how to construct the offset file see :ref:`offsets`. @@ -135,13 +134,15 @@ A parameter only used for investigating which detector pixels contributed to a c Creating an offset file ----------------------- -The offset file is an ASDF formated file :``_ stands for "Advanced Scientific Data. For each -input file in the spec3 assocation used to build the IFU cubes, the offset files needs to have a corresponding right ascension and declination offset given arc seconds. +The offset file is an ASDF formatted file :``_ stands for "Advanced Scientific Data. +For each input file in the spec3 association used to build the IFU cubes, the offset file needs to have a +corresponding right ascension and declination offset given in arc seconds. + Below is an example of how to make an ASDF offset file. It is assumed the user has determined the offsets to apply to the data in each file. The offsets information is stored in three lists: -`filenames`, `raoffset` and `decoffset`. The units of the ra and dec offsets +`filenames`, `raoffset` and `decoffset`. The units of the Ra and Dec offsets are required to be in the offset file and only the unit, `arcsec`, is allowed. The file names should -not contain the directory path. The offset asdf filename can be any name, but it must have the `asdf` extension. +not contain the directory path. The offset file can have any name, but it must have the `asdf` extension. Below `num` is the number of files. @@ -165,7 +166,30 @@ Below `num` is the number of files. "decoffset": decoffset } af = asdf.AsdfFile(tree) - af.write_to(input_dir + 'offsets.asdf') + af.write_to('offsets.asdf') af.close() +Or lets say there a small number of files in the assocations. The filename, raoffset and decoffset can be set +in the code. For example, if there are three files in the assocation the offset file can be created as follows: + +.. code-block:: python + + import asdf + import astropy.units as u + + filename = ['file1.fits', 'file2.fits', 'file3.fits'] + raoffset = [0.0, -1.0, 1.0] + decoffset = [0.0, 1.0, -1.0] + + tree = { + "units": str(u.arcsec), + "filename": filename, + "raoffset": raoffset, + "decoffset": decoffset + } + af = asdf.AsdfFile(tree) + af.write_to('offsets.asdf') + af.close() + + From c356158df3e11bbc480f3b4c62c0ef2e1c8802c1 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Fri, 13 Sep 2024 20:22:36 -0700 Subject: [PATCH 25/39] update docs --- docs/jwst/cube_build/arguments.rst | 31 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 88e0e2c8dd..b66b53b419 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -143,21 +143,16 @@ offsets to apply to the data in each file. The offsets information is stored in `filenames`, `raoffset` and `decoffset`. The units of the Ra and Dec offsets are required to be in the offset file and only the unit, `arcsec`, is allowed. The file names should not contain the directory path. The offset file can have any name, but it must have the `asdf` extension. -Below `num` is the number of files. - +An example of making an offset file for an association containing three files is: .. code-block:: python import asdf import astropy.units as u - filename = [] - raoffset = [] - decoffset = [] - for i in range(num): - filename.append(file[i]) - raoffset.append(ra_center1[i]) - decoffset.append(dec_center1[i]) + filename = ['file1.fits', 'file2.fits', 'file3.fits'] + raoffset = [0.0, -1.0, 1.0] + decoffset = [0.0, 1.0, -1.0] tree = { "units": str(u.arcsec), @@ -168,19 +163,25 @@ Below `num` is the number of files. af = asdf.AsdfFile(tree) af.write_to('offsets.asdf') af.close() + + -Or lets say there a small number of files in the assocations. The filename, raoffset and decoffset can be set -in the code. For example, if there are three files in the assocation the offset file can be created as follows: +An exmaple of making an offset file for `num` files is + .. code-block:: python import asdf import astropy.units as u - filename = ['file1.fits', 'file2.fits', 'file3.fits'] - raoffset = [0.0, -1.0, 1.0] - decoffset = [0.0, 1.0, -1.0] + filename = [] + raoffset = [] + decoffset = [] + for i in range(num): + filename.append(file[i]) + raoffset.append(ra_center1[i]) + decoffset.append(dec_center1[i]) tree = { "units": str(u.arcsec), @@ -191,5 +192,5 @@ in the code. For example, if there are three files in the assocation the offset af = asdf.AsdfFile(tree) af.write_to('offsets.asdf') af.close() - + From cc66ae9e2fb3303c77e4325fd85e16f9a0d87e8d Mon Sep 17 00:00:00 2001 From: jemorrison Date: Mon, 16 Sep 2024 15:37:46 -0700 Subject: [PATCH 26/39] changed how ra offset values are determined --- docs/jwst/cube_build/arguments.rst | 48 ++-- jwst/cube_build/cube_build_step.py | 21 +- jwst/cube_build/ifu_cube.py | 158 +++++++---- jwst/cube_build/tests/test_configuration.py | 109 -------- jwst/cube_build/tests/test_offset.py | 290 ++++++++++++++++++++ 5 files changed, 450 insertions(+), 176 deletions(-) create mode 100644 jwst/cube_build/tests/test_offset.py diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index b66b53b419..74a1f72e46 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -167,30 +167,44 @@ An example of making an offset file for an association containing three files is -An exmaple of making an offset file for `num` files is +An example of making an offset file for `num` files +where the user has set up list called `file` containing the `num` filenames. +The cooresponding Ra and Dec offsets, both containing num values, are stored in lists called, +`ra_offset` and `dec_offset` .. code-block:: python import asdf import astropy.units as u - - filename = [] - raoffset = [] - decoffset = [] - for i in range(num): - filename.append(file[i]) - raoffset.append(ra_center1[i]) - decoffset.append(dec_center1[i]) - - tree = { - "units": str(u.arcsec), - "filename": filename, - "raoffset": raoffset, - "decoffset": decoffset + def create_offset_asdf(files, ra_offset, dec_offset): + + filename = [] + raoffset = [] + decoffset = [] + num = len(files) + for i in range(num): + filename.append(files[i]) + raoffset.append(ra_offset[i]) + decoffset.append(dec_offset[i]) + + tree = { + "units": str(u.arcsec), + "filename": filename, + "raoffset": raoffset, + "decoffset": decoffset } af = asdf.AsdfFile(tree) - af.write_to('offsets.asdf') - af.close() + af.write_to( 'offsets.asdf') +Set up the lists and call the above function: + +.. code-block:: python + + files = ['test1.fits', 'test2.fits', 'test3.fits'] + ra_offset = [0.1, 0.12, 0.13] + dec_offset = [0.14, 0.15, 0.16] + create_offset_asdf(files, ra_offset, dec_offset) + + diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 08f00d2d52..3341b6ba18 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -243,6 +243,7 @@ def process(self, input): # Each row in the offset list contain a filename, ra offset and dec offset. # The offset list is an asdf file. self.offsets = None + if self.offset_file is not None: offsets = self.check_offset_file() if offsets is not None: @@ -578,7 +579,8 @@ def check_offset_file(self): self.log.error('Turning off adjusting by offsets ') af.close() return None - + + # check that all the file names in input_model are in the offset filename for model in self.input_models: file_check = model.meta.filename if file_check in offset_filename: @@ -588,10 +590,25 @@ def check_offset_file(self): self.log.error('Turning off adjusting by offsets') af.close() return None + # check that all the lists have the same length + len_file = len(offset_filename) + len_ra = len(offset_ra) + len_dec = len(offset_dec) + if (len_file != len_ra or len_ra != len_dec or len_file != len_dec): + self.log.error('The offset file does not have the same number of values for filename, offset_ra, offset_dec') + self.log.error('Turning off adjusting by offsets') + af.close() + return None + + # The offset file has passed tests so set the offset dictionary offsets = {} offsets['filename'] = offset_filename offsets['raoffset'] = offset_ra offsets['decoffset'] = offset_dec - + n = len(offsets['raoffset']) + # convert to degrees + for i in range(n): + offsets['raoffset'][i] = offsets['raoffset'][i]/3600.0 + offsets['decoffset'][i] = offsets['decoffset'][i]/3600.0 af.close() return offsets diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index 56e5b72f4c..e325424407 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -14,6 +14,8 @@ from stdatamodels.jwst import datamodels from stdatamodels.jwst.datamodels import dqflags from stdatamodels.jwst.transforms.models import _toindex +from astropy import units +from astropy.coordinates import SkyCoord from ..model_blender import blendmeta from ..assign_wcs import pointing @@ -602,7 +604,8 @@ def build_ifucube(self): # ________________________________________________________________________________ # loop over the files that cover the spectral range the cube is for - input_model = datamodels.open(input) + #input_model = datamodels.open(input) + input_model = input self.input_models_this_cube.append(input_model.copy()) # set up input_model to be first file used to copy in basic header info # to ifucube meta data @@ -1235,7 +1238,6 @@ def setup_ifucube_wcs(self): # _____________________________________________________________________________ self.cdelt1 = self.spatial_size self.cdelt2 = self.spatial_size - deg2rad = math.pi / 180.0 if self.linear_wavelength: self.cdelt3 = self.spectral_size @@ -1315,7 +1317,6 @@ def setup_ifucube_wcs(self): # find the median center declination if we have an offset file if self.offsets is not None: - decs = [] for k in range(n): input_file = self.master_table.FileMap[self.instrument][this_a][this_b][k] input_model = datamodels.open(input_file) @@ -1325,17 +1326,7 @@ def setup_ifucube_wcs(self): cb2 = float(s[6]) cb3 = float(s[8]) cb4 = float(s[10]) - m = (cb1 + cb2 + cb3 + cb4)/4 - decs.append(m) - - self.median_dec = np.nanmedian(decs) - # fold in the median_dec information into ra offset - noffsets = len(self.offsets['raoffset']) - # convert ra and dec offsets to degrees and adjust ra offset for cos(dec) - for im in range(noffsets): - self.offsets['raoffset'][im] = (self.offsets['raoffset'][im]/3600.0)/np.cos(self.median_dec*deg2rad) - self.offsets['decoffset'][im] = self.offsets['decoffset'][im]/3600.0 - + for k in range(n): lmin = 0.0 lmax = 0.0 @@ -1408,15 +1399,38 @@ def setup_ifucube_wcs(self): ca1, cb1, ca2, cb2, ca3, cb3, ca4, cb4, lmin, lmax = ch_corners # now append this model spatial and spectral corner - corner_a.append(ca1 + raoffset) - corner_a.append(ca2 + raoffset) - corner_a.append(ca3 + raoffset) - corner_a.append(ca4 + raoffset) - - corner_b.append(cb1 + decoffset) - corner_b.append(cb2 + decoffset) - corner_b.append(cb3 + decoffset) - corner_b.append(cb4 + decoffset) + if self.offsets is not None: + c1 = SkyCoord(ca1, cb1, unit='deg') + c2 = SkyCoord(ca2, cb2, unit='deg') + c3 = SkyCoord(ca3, cb3, unit='deg') + c4 = SkyCoord(ca4, cb4, unit='deg') + raoffset = raoffset* units.deg + decoffset = decoffset* units.deg + + c1_new = c1.spherical_offsets_by(raoffset, decoffset) + c2_new = c2.spherical_offsets_by(raoffset, decoffset) + c3_new = c3.spherical_offsets_by(raoffset, decoffset) + c4_new = c4.spherical_offsets_by(raoffset, decoffset) + ca1 = c1_new.ra.value + cb1 = c1_new.dec.value + + ca2 = c2_new.ra.value + cb2 = c2_new.dec.value + + ca3 = c3_new.ra.value + cb3 = c3_new.dec.value + + ca4 = c4_new.ra.value + cb4 = c4_new.dec.value + corner_a.append(ca1) + corner_a.append(ca2) + corner_a.append(ca3) + corner_a.append(ca4) + + corner_b.append(cb1) + corner_b.append(cb2) + corner_b.append(cb3) + corner_b.append(cb4) lambda_min.append(lmin) lambda_max.append(lmax) @@ -1760,7 +1774,6 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, slice_no = None # Slice number dwave = None corner_coord = None - deg2rad = math.pi / 180.0 raoffset = 0.0 decoffset = 0.0 @@ -1771,9 +1784,10 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, raoffset = offsets['raoffset'][index] decoffset = offsets['decoffset'][index] log.info("Ra and dec offset (arc seconds) applied to file :%8.6f, %8.6f, %s", - raoffset*3600.0*np.cos(self.median_dec*deg2rad), + raoffset*3600.0, decoffset*3600.0, filename) - + raoffset = raoffset* units.deg + decoffset = decoffset* units.deg # check if background sky matching as been done in mrs_imatch step # If it has not been subtracted and the background has not been # subtracted - subtract it. @@ -1809,8 +1823,13 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, # if self.coord_system == 'skyalign' or self.coord_system == 'ifualign': ra, dec, wave = input_model.meta.wcs(x, y) - ra = ra + raoffset - dec = dec + decoffset + + if offsets is not None: + c1 = SkyCoord(ra, dec, unit='deg') + c1_new = c1.spherical_offsets_by(raoffset, decoffset) + ra = c1_new.ra.value + dec = c1_new.dec.value + valid1 = ~np.isnan(ra) ra = ra[valid1] dec = dec[valid1] @@ -1850,15 +1869,28 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, input_model.meta.wcs.output_frame, alpha2, beta - dbeta * pixfrac / 2., wave) - ra1 = ra1 + raoffset - ra2 = ra2 + raoffset - ra3 = ra3 + raoffset - ra4 = ra4 + raoffset + if offsets is not None: + c1 = SkyCoord(ra1, dec1, unit='deg') + c2 = SkyCoord(ra2, dec2, unit='deg') + c3 = SkyCoord(ra3, dec3, unit='deg') + c4 = SkyCoord(ra4, dec4, unit='deg') + + c1_new = c1.spherical_offsets_by(raoffset, decoffset) + c2_new = c2.spherical_offsets_by(raoffset, decoffset) + c3_new = c3.spherical_offsets_by(raoffset, decoffset) + c4_new = c4.spherical_offsets_by(raoffset, decoffset) + ra1 = c1_new.ra.value + dec1 = c1_new.dec.value + + ra2 = c2_new.ra.value + dec2 = c2_new.dec.value - dec1 = dec1 + decoffset - dec2 = dec2 + decoffset - dec3 = dec3 + decoffset - dec4 = dec4 + decoffset + ra3 = c3_new.ra.value + dec3 = c3_new.dec.value + + ra4 = c4_new.ra.value + dec4 = c4_new.dec.value + corner_coord = [ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4] @@ -1895,7 +1927,8 @@ def map_nirspec_pixel_to_sky(self, input_model, offsets): decoffset = offsets['decoffset'][index] log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", raoffset, decoffset, filename) - + raoffset = raoffset* units.deg + decoffset = decoffset* units.deg # initialize the ra,dec, and wavelength arrays # we will loop over slice_nos and fill in values # the flag_det will be set when a slice_no pixel is filled in @@ -2043,21 +2076,50 @@ def map_nirspec_pixel_to_sky(self, input_model, offsets): valid_data = np.where(flag_det == 1) y, x = valid_data - - ra = ra_det[valid_data] + raoffset - dec = dec_det[valid_data] + decoffset + wave = lam_det[valid_data] slice_no = slice_det[valid_data] dwave = dwave_det[valid_data] - ra1 = ra1_det[valid_data] + raoffset - ra2 = ra2_det[valid_data] + raoffset - ra3 = ra3_det[valid_data] + raoffset - ra4 = ra4_det[valid_data] + raoffset - dec1 = dec1_det[valid_data] + decoffset - dec2 = dec2_det[valid_data] + decoffset - dec3 = dec3_det[valid_data] + decoffset - dec4 = dec4_det[valid_data] + decoffset + + ra = ra_det[valid_data] + dec = dec_det[valid_data] + ra1 = ra1_det[valid_data] + ra2 = ra2_det[valid_data] + ra3 = ra3_det[valid_data] + ra4 = ra4_det[valid_data] + dec1 = dec1_det[valid_data] + dec2 = dec2_det[valid_data] + dec3 = dec3_det[valid_data] + dec4 = dec4_det[valid_data] + + if offsets is not None: + c1 = SkyCoord(ra, dec, unit='deg') + + c1_new = c1.spherical_offsets_by(raoffset, decoffset) + ra = c1_new.ra.value + dec = c1_new.dec.value + + c1 = SkyCoord(ra1, dec1, unit='deg') + c2 = SkyCoord(ra2, dec2, unit='deg') + c3 = SkyCoord(ra3, dec3, unit='deg') + c4 = SkyCoord(ra4, dec4, unit='deg') + c1_new = c1.spherical_offsets_by(raoffset, decoffset) + c2_new = c2.spherical_offsets_by(raoffset, decoffset) + c3_new = c3.spherical_offsets_by(raoffset, decoffset) + c4_new = c4.spherical_offsets_by(raoffset, decoffset) + ra1 = c1_new.ra.value + dec1 = c1_new.dec.value + + ra2= c2_new.ra.value + dec2 = c2_new.dec.value + + ra3 = c3_new.ra.value + dec3 = c3_new.dec.value + + ra4= c4_new.ra.value + dec4 = c4_new.dec.value + corner_coord = [ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4] sky_result = (x, y, ra, dec, wave, slice_no, dwave, corner_coord) return sky_result diff --git a/jwst/cube_build/tests/test_configuration.py b/jwst/cube_build/tests/test_configuration.py index 0ef5ab1e2e..9a9eee11db 100644 --- a/jwst/cube_build/tests/test_configuration.py +++ b/jwst/cube_build/tests/test_configuration.py @@ -102,50 +102,6 @@ 'ystart': 1 } - -@pytest.fixture(scope='module') -def offset_file(tmp_path_factory): - """ Generate a offset file """ - - filename = tmp_path_factory.mktemp('offset') - filename = filename / 'offset.asdf' - - testfile = ['test1.fits', 'test2.fits'] - raoffset = [0.0, 0.1] - decoffset = [0.0, 0.15] - tree = { - "units": str(u.arcsec), - "filename": testfile, - "raoffset": raoffset, - "decoffset": decoffset - } - af = asdf.AsdfFile(tree) - af.write_to(filename) - af.close() - return filename - - -@pytest.fixture(scope='module') -def offset_file_arcmin(tmp_path_factory): - """ Generate a offset file with units = arcmin """ - - filename = tmp_path_factory.mktemp('offset') - filename = filename / 'offset_arcmin.asdf' - - testfile = ['test1.fits', 'test2.fits'] - raoffset = [0.0, 0.1] - decoffset = [0.0, 0.15] - tree = { - "units": str(u.arcmin), - "filename": testfile, - "raoffset": raoffset, - "decoffset": decoffset - } - af = asdf.AsdfFile(tree) - af.write_to(filename) - return filename - - @pytest.fixture(scope='function') def miri_ifushort_short(): """ Generate a IFU image """ @@ -158,33 +114,6 @@ def miri_ifushort_short(): input_model.meta.cal_step.assign_wcs = 'COMPLETE' return input_model - -@pytest.fixture(scope='function') -def miri_ifushort_short_2files(): - """ Generate a IFU image """ - - input_model1 = datamodels.IFUImageModel() - input_model1.meta.wcsinfo._instance.update(wcsinfo) - input_model1.meta.instrument._instance.update(mirifushort_short) - input_model1.meta.observation._instance.update(observation) - input_model1.meta.subarray._instance.update(subarray) - input_model1.meta.cal_step.assign_wcs = 'COMPLETE' - input_model1.meta.filename = 'test1.fits' - - input_model2 = datamodels.IFUImageModel() - input_model2.meta.wcsinfo._instance.update(wcsinfo) - input_model2.meta.instrument._instance.update(mirifushort_short) - input_model2.meta.observation._instance.update(observation) - input_model2.meta.subarray._instance.update(subarray) - input_model2.meta.cal_step.assign_wcs = 'COMPLETE' - input_model2.meta.filename = 'test2.fits' - - input_models = [] - input_models.append(input_model1) - input_models.append(input_model2) - return input_models - - @pytest.fixture(scope='function') def miri_full_coverage(): """ Generate a IFU images SHORT, LONG for all three bands """ @@ -539,41 +468,3 @@ def test_calspec3_config_nirspec_multi(tmp_cwd, nirspec_medium_coverage): assert cube_pars['1']['par1'] == ['g140m', 'g235m'] assert cube_pars['1']['par2'] == ['f100lp', 'f170lp'] - - -def test_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): - """ Test validation of the offset configuration""" - - # first test that it is a valid asdf file and has what is needed - step = CubeBuildStep() - step.input_models = miri_ifushort_short_2files - - step.offset_file = offset_file - offsets = step.check_offset_file() - assert isinstance(offsets, dict) - -def test2_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): - """ Test validation of the offset configuration""" - - # Test changing one of the filenames so it is not in the list given - # in the offset_file - step = CubeBuildStep() - step.input_models = miri_ifushort_short_2files - - miri_ifushort_short_2files[0].meta.filename = 'test3.fits' - step.offset_file = offset_file - offsets = step.check_offset_file() - assert offsets is None - - -def test_offset_file_config2(tmp_cwd, miri_ifushort_short_2files, offset_file_arcmin): - """ Test validation of the offset configuration""" - - # test is the if the user set the units to arcmins - step = CubeBuildStep() - step.input_models = miri_ifushort_short_2files - - step.offset_file = offset_file_arcmin - offsets = step.check_offset_file() - assert offsets is None - diff --git a/jwst/cube_build/tests/test_offset.py b/jwst/cube_build/tests/test_offset.py new file mode 100644 index 0000000000..275764bba2 --- /dev/null +++ b/jwst/cube_build/tests/test_offset.py @@ -0,0 +1,290 @@ +""" +Unit test for Cube Build testing setting up configuration +""" + +import pytest +import sys +import math +import asdf +from stdatamodels.jwst import datamodels +import astropy.units as u +from gwcs import WCS +import numpy as np +from jwst.cube_build import CubeBuildStep +from jwst.cube_build import cube_build +from jwst.cube_build import ifu_cube +from jwst.cube_build import file_table +from jwst.cube_build import instrument_defaults +from jwst import assign_wcs + + +@pytest.fixture(scope='module') +def offset_file(tmp_path_factory): + """ Generate a offset file """ + + filename = tmp_path_factory.mktemp('offset') + filename = filename / 'offset.asdf' + + testfile = ['test1.fits', 'test2.fits'] + raoffset = [0.0, 0.1] + decoffset = [0.0, 0.15] + tree = { + "units": str(u.arcsec), + "filename": testfile, + "raoffset": raoffset, + "decoffset": decoffset + } + af = asdf.AsdfFile(tree) + af.write_to(filename) + af.close() + return filename + + +@pytest.fixture(scope='module') +def offset_file_arcmin(tmp_path_factory): + """ Generate a offset file with units = arcmin """ + + filename = tmp_path_factory.mktemp('offset') + filename = filename / 'offset_arcmin.asdf' + + testfile = ['test1.fits', 'test2.fits'] + raoffset = [0.0, 0.1] + decoffset = [0.0, 0.15] + tree = { + "units": str(u.arcmin), + "filename": testfile, + "raoffset": raoffset, + "decoffset": decoffset + } + af = asdf.AsdfFile(tree) + af.write_to(filename) + return filename + +@pytest.fixture(scope='function') +def miri_ifushort_short_2files(): + """ Generate input model with 2 IFU images """ + + observation = { + 'date': '2019-01-01', + 'time': '17:00:00'} + + subarray = { + 'fastaxis': 1, + 'name': 'FULL', + 'slowaxis': 2, + 'xsize': 1032, + 'xstart': 1, + 'ysize': 1024, + 'ystart': 1 + } + + wcsinfo = { + 'dec_ref': 39.05036271706514, + 'ra_ref': 339.8149235604264 , + 'roll_ref': 217.25027556008598 , + 'v2_ref': -503.378, + 'v3_ref': -318.9992, + 'v3yangle': 0.0, + 'vparity': -1, + 's_region': 'POLYGON ICRS 339.813915797 39.049575409 339.816080118 39.049575409 339.816080118 39.051260090 339.813915797 39.051260090', + 'spectral_region': ([4.889451133245338, 8.781164838427532]) + } + + mirifushort_short = { + 'detector': 'MIRIFUSHORT', + 'channel': '12', + 'band': 'SHORT', + 'name': 'MIRI' + } + + input_model1 = datamodels.IFUImageModel() + input_model1.meta.exposure.type = 'MIR_MRS' + input_model1.meta.wcsinfo._instance.update(wcsinfo) + input_model1.meta.instrument._instance.update(mirifushort_short) + input_model1.meta.observation._instance.update(observation) + input_model1.meta.subarray._instance.update(subarray) + input_model1.meta.cal_step.assign_wcs = 'COMPLETE' + input_model1.meta.filename = 'test1.fits' + input_model1.data = np.random.random((1024, 1032)) + + input_model2 = datamodels.IFUImageModel() + input_model2.meta.exposure.type = 'MIR_MRS' + input_model2.meta.wcsinfo._instance.update(wcsinfo) + input_model2.meta.instrument._instance.update(mirifushort_short) + input_model2.meta.observation._instance.update(observation) + input_model2.meta.subarray._instance.update(subarray) + input_model2.meta.cal_step.assign_wcs = 'COMPLETE' + input_model2.meta.filename = 'test2.fits' + input_model2.data = np.random.random((1024, 1032)) + + input_models = [] + + step = assign_wcs.assign_wcs_step.AssignWcsStep() + refs = {} + for reftype in assign_wcs.assign_wcs_step.AssignWcsStep.reference_file_types: + refs[reftype] = step.get_reference_file(input_model1, reftype) + pipe = assign_wcs.miri.create_pipeline(input_model1, refs) + input_model1.meta.wcs = WCS(pipe) + + for reftype in assign_wcs.assign_wcs_step.AssignWcsStep.reference_file_types: + refs[reftype] = step.get_reference_file(input_model2, reftype) + pipe = assign_wcs.miri.create_pipeline(input_model2, refs) + input_model2.meta.wcs = WCS(pipe) + + input_models.append(input_model1) + input_models.append(input_model2) + return input_models + + +def test_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): + """ Test validation of the offset configuration""" + + # first test that it is a valid asdf file and has what is needed + step = CubeBuildStep() + step.input_models = miri_ifushort_short_2files + + step.offset_file = offset_file + offsets = step.check_offset_file() + assert isinstance(offsets, dict) + + +def test2_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): + """ Test validation of the offset configuration""" + + # Test changing one of the filenames so it is not in the list given + # in the offset_file + step = CubeBuildStep() + step.input_models = miri_ifushort_short_2files + + miri_ifushort_short_2files[0].meta.filename = 'test3.fits' + step.offset_file = offset_file + offsets = step.check_offset_file() + assert offsets is None + + +def test_offset_file_units(tmp_cwd, miri_ifushort_short_2files, offset_file_arcmin): + """ Test offsets are not used when units are arc minutes""" + + # test is the if the user set the units to arcmins + step = CubeBuildStep() + step.input_models = miri_ifushort_short_2files + + step.offset_file = offset_file_arcmin + offsets = step.check_offset_file() + assert offsets is None + + +def test_read_offset_file(miri_ifushort_short_2files, offset_file): + """ Test offset file has been read in correctly""" + + step = CubeBuildStep() + step.input_models = miri_ifushort_short_2files + step.offset_file = offset_file + offsets = step.check_offset_file() + # Test that the offset file is read in and is a dictionary + assert isinstance(offsets, dict) + + pars_input = {} + pars_input['channel'] = [] + pars_input['subchannel'] = [] + pars_input['filter'] = [] + pars_input['grating'] = [] + weighting = 'drizzle' + output_type = 'multi' + single = False + par_filename = 'None' + + # set up pars needed for CubeData class + pars = { + 'channel': pars_input['channel'], + 'subchannel': pars_input['subchannel'], + 'grating': pars_input['grating'], + 'filter': pars_input['filter'], + 'weighting': weighting, + 'single': single, + 'output_type': output_type, + 'offset_file': offset_file} + + cubeinfo = cube_build.CubeData( + miri_ifushort_short_2files, + par_filename, + **pars) + + master_table = file_table.FileTable() + this_instrument = master_table.set_file_table( + cubeinfo.input_models) + + cubeinfo.instrument = this_instrument + cubeinfo.determine_band_coverage(master_table) + num_cubes, cube_pars = cubeinfo.number_cubes() + # test with output_type = mulit we get 1 cube + # test that cube info sets up the correct channels and band for data + assert num_cubes == 1 + assert cube_pars['1']['par1'] == ['1','2'] + assert cube_pars['1']['par2'] == ['short','short'] + + wave_min = 4.88 + wave_max = 8.78 + + # set up par for IFUCubeData CLASS + pars_cube = { + 'scalexy': 0.0, + 'scalew': 0.0, + 'interpolation': 'drizzle', + 'weighting': 'drizzle', + 'weight_power':None, + 'coord_system': 'skyalign', + 'ra_center': None, + 'dec_center': None, + 'cube_pa': None, + 'nspax_x': None, + 'nspax_y': None, + 'rois': None, + 'riow': None, + 'wavemin': wave_min, + 'wavemax': wave_max, + 'skip_dqflagging': False, + 'offsets': offsets, + 'debug_spaxel': '0 0 0'} + + pipeline = 3 + list_par1 = ['1','2'] + list_par2 = ['short','short'] + output_name_base = 'TEMP' + + instrument_info = instrument_defaults.InstrumentInfo() + + thiscube = ifu_cube.IFUCubeData( + pipeline, + miri_ifushort_short_2files, + output_name_base, + output_type, + this_instrument, + list_par1, + list_par2, + instrument_info, + master_table, + **pars_cube) + + thiscube.linear_wavelength = True + thiscube.spatial_size = 0.13 + thiscube.spectral_size = 0.001 + thiscube.setup_ifucube_wcs() + + # test the offset file was read in correctly + filename = ['test1.fits', 'test2.fits'] + raoffset = [0.0, 0.1] + decoffset = [0.0, 0.15] + + ravalues = thiscube.offsets['raoffset'] + decvalues = thiscube.offsets['decoffset'] + + assert thiscube.offsets['filename'] == filename + + assert math.isclose(ravalues[0]*3600.0, raoffset[0], abs_tol=0.0001) + assert math.isclose(ravalues[1]*3600.0, raoffset[1], abs_tol=0.0001) + assert math.isclose(decvalues[0]*3600.0, decoffset[0], abs_tol=0.0001) + assert math.isclose(decvalues[1]*3600.0, decoffset[1], abs_tol=0.0001) + + + From 9c0aaca6503351453d03e7ba0e69f3ff080aa0d9 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Tue, 17 Sep 2024 14:03:22 -0700 Subject: [PATCH 27/39] fix doc error --- docs/jwst/cube_build/arguments.rst | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 74a1f72e46..ca749d5d21 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -145,6 +145,7 @@ are required to be in the offset file and only the unit, `arcsec`, is allowed. T not contain the directory path. The offset file can have any name, but it must have the `asdf` extension. An example of making an offset file for an association containing three files is: + .. code-block:: python import asdf @@ -167,10 +168,10 @@ An example of making an offset file for an association containing three files is -An example of making an offset file for `num` files -where the user has set up list called `file` containing the `num` filenames. -The cooresponding Ra and Dec offsets, both containing num values, are stored in lists called, -`ra_offset` and `dec_offset` +Below is an example of making an offset file for `num` files. +The user has set up the `file` list containing the `num` filenames and the +cooresponding Ra and Dec offsets lists, both containing num values. In this example +these list are called `ra_offset` and `dec_offset` .. code-block:: python @@ -202,9 +203,9 @@ Set up the lists and call the above function: .. code-block:: python - files = ['test1.fits', 'test2.fits', 'test3.fits'] - ra_offset = [0.1, 0.12, 0.13] - dec_offset = [0.14, 0.15, 0.16] + files = ['test1.fits', 'test2.fits', 'test3.fits', 'test4,fits', 'test5.fits'] + ra_offset = [0.1, 0.12, 0.13, 0.11, 0.12] + dec_offset = [0.14, 0.15, 0.16, 0.01, 0.1] create_offset_asdf(files, ra_offset, dec_offset) From d5c6c7d86e150e21935747cbdbfd1dd94f864569 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Tue, 17 Sep 2024 15:10:22 -0700 Subject: [PATCH 28/39] update comments and doc --- docs/jwst/cube_build/arguments.rst | 1 - jwst/cube_build/ifu_cube.py | 29 +++++++++++++++-------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index ca749d5d21..90b3901bc8 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -167,7 +167,6 @@ An example of making an offset file for an association containing three files is - Below is an example of making an offset file for `num` files. The user has set up the `file` list containing the `num` filenames and the cooresponding Ra and Dec offsets lists, both containing num values. In this example diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index e325424407..54270a7481 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1316,16 +1316,16 @@ def setup_ifucube_wcs(self): log.debug('number of files %d', n) # find the median center declination if we have an offset file - if self.offsets is not None: - for k in range(n): - input_file = self.master_table.FileMap[self.instrument][this_a][this_b][k] - input_model = datamodels.open(input_file) - spatial_box = input_model.meta.wcsinfo.s_region - s = spatial_box.split(' ') - cb1 = float(s[4]) - cb2 = float(s[6]) - cb3 = float(s[8]) - cb4 = float(s[10]) + #if self.offsets is not None: + # for k in range(n): + # input_file = self.master_table.FileMap[self.instrument][this_a][this_b][k] + # input_model = datamodels.open(input_file) + # spatial_box = input_model.meta.wcsinfo.s_region + # s = spatial_box.split(' ') + # cb1 = float(s[4]) + # cb2 = float(s[6]) + # cb3 = float(s[8]) + # cb4 = float(s[10]) for k in range(n): lmin = 0.0 @@ -1824,6 +1824,7 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, # if self.coord_system == 'skyalign' or self.coord_system == 'ifualign': ra, dec, wave = input_model.meta.wcs(x, y) + # offset the central pixel if offsets is not None: c1 = SkyCoord(ra, dec, unit='deg') c1_new = c1.spherical_offsets_by(raoffset, decoffset) @@ -1869,6 +1870,7 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, input_model.meta.wcs.output_frame, alpha2, beta - dbeta * pixfrac / 2., wave) + # now offset the pixel corners if offsets is not None: c1 = SkyCoord(ra1, dec1, unit='deg') c2 = SkyCoord(ra2, dec2, unit='deg') @@ -1891,7 +1893,6 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, ra4 = c4_new.ra.value dec4 = c4_new.dec.value - corner_coord = [ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4] sky_result = (x, y, ra, dec, wave, slice_no, dwave, corner_coord) @@ -2081,7 +2082,6 @@ def map_nirspec_pixel_to_sky(self, input_model, offsets): slice_no = slice_det[valid_data] dwave = dwave_det[valid_data] - ra = ra_det[valid_data] dec = dec_det[valid_data] ra1 = ra1_det[valid_data] @@ -2093,13 +2093,14 @@ def map_nirspec_pixel_to_sky(self, input_model, offsets): dec3 = dec3_det[valid_data] dec4 = dec4_det[valid_data] - if offsets is not None: + if offsets is not None: + # central pixel c1 = SkyCoord(ra, dec, unit='deg') - c1_new = c1.spherical_offsets_by(raoffset, decoffset) ra = c1_new.ra.value dec = c1_new.dec.value + # pixel corners c1 = SkyCoord(ra1, dec1, unit='deg') c2 = SkyCoord(ra2, dec2, unit='deg') c3 = SkyCoord(ra3, dec3, unit='deg') From 78f702c3fde3143df7b9c87beff7c4755e015d61 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Tue, 17 Sep 2024 15:32:00 -0700 Subject: [PATCH 29/39] update docs and update log message --- docs/jwst/cube_build/arguments.rst | 8 ++++---- jwst/cube_build/ifu_cube.py | 7 ++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 90b3901bc8..479a0253f6 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -168,10 +168,10 @@ An example of making an offset file for an association containing three files is Below is an example of making an offset file for `num` files. -The user has set up the `file` list containing the `num` filenames and the -cooresponding Ra and Dec offsets lists, both containing num values. In this example -these list are called `ra_offset` and `dec_offset` - +The user has set up three lists `file`, `ra_offset` and `dec_offset`. The `file` list +contains the filenames and the `ra_offset` and `dec_offset` contain the Ra and Dec offsets respectively. +In this example, all the list have five values. The units of the of Ra and Dec offsets are given in +the `units` value. .. code-block:: python diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index 54270a7481..d2e73e44cf 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1783,7 +1783,7 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, index = offsets['filename'].index(filename) raoffset = offsets['raoffset'][index] decoffset = offsets['decoffset'][index] - log.info("Ra and dec offset (arc seconds) applied to file :%8.6f, %8.6f, %s", + log.info("Ra and Dec offset (arc seconds) applied to file :%8.6f, %8.6f, %s", raoffset*3600.0, decoffset*3600.0, filename) raoffset = raoffset* units.deg @@ -1926,8 +1926,9 @@ def map_nirspec_pixel_to_sky(self, input_model, offsets): index = offsets['filename'].index(filename) raoffset = offsets['raoffset'][index] decoffset = offsets['decoffset'][index] - log.info("Ra and dec offset (arc seconds) applied to file :%5.2f, %5.2f, %s", - raoffset, decoffset, filename) + log.info("Ra and Dec offset (arc seconds) applied to file :%8.6f, %8.6f, %s", + raoffset*3600.0, decoffset*3600.0, filename) + raoffset = raoffset* units.deg decoffset = decoffset* units.deg # initialize the ra,dec, and wavelength arrays From 32b4044a4968ebe59521ee3c8e825c16e6f15d89 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Tue, 17 Sep 2024 15:48:42 -0700 Subject: [PATCH 30/39] docs update --- docs/jwst/cube_build/arguments.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 479a0253f6..717095d38d 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -140,7 +140,7 @@ corresponding right ascension and declination offset given in arc seconds. Below is an example of how to make an ASDF offset file. It is assumed the user has determined the offsets to apply to the data in each file. The offsets information is stored in three lists: -`filenames`, `raoffset` and `decoffset`. The units of the Ra and Dec offsets +`filename`, `raoffset` and `decoffset`. The units of the Ra and Dec offsets are required to be in the offset file and only the unit, `arcsec`, is allowed. The file names should not contain the directory path. The offset file can have any name, but it must have the `asdf` extension. @@ -171,7 +171,7 @@ Below is an example of making an offset file for `num` files. The user has set up three lists `file`, `ra_offset` and `dec_offset`. The `file` list contains the filenames and the `ra_offset` and `dec_offset` contain the Ra and Dec offsets respectively. In this example, all the list have five values. The units of the of Ra and Dec offsets are given in -the `units` value. +the `units` value and this value must be arc seconds. .. code-block:: python From 4e665cb885c5e28ce47a371fb888dae030981823 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Wed, 18 Sep 2024 07:38:50 -0700 Subject: [PATCH 31/39] removed openning and closing model --- jwst/cube_build/ifu_cube.py | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index d2e73e44cf..5d3802d1ae 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -600,12 +600,10 @@ def build_ifucube(self): for ib in range(number_bands): this_par1 = self.list_par1[ib] this_par2 = self.list_par2[ib] - for input in self.master_table.FileMap[self.instrument][this_par1][this_par2]: + for input_model in self.master_table.FileMap[self.instrument][this_par1][this_par2]: # ________________________________________________________________________________ # loop over the files that cover the spectral range the cube is for - #input_model = datamodels.open(input) - input_model = input self.input_models_this_cube.append(input_model.copy()) # set up input_model to be first file used to copy in basic header info # to ifucube meta data @@ -777,8 +775,6 @@ def build_ifucube(self): result = None del spaxel_flux, spaxel_weight, spaxel_var, spaxel_iflux, result k = k + 1 - input_model.close() - del input_model # _______________________________________________________________________ # done looping over files @@ -1315,24 +1311,11 @@ def setup_ifucube_wcs(self): n = len(self.master_table.FileMap[self.instrument][this_a][this_b]) log.debug('number of files %d', n) - # find the median center declination if we have an offset file - #if self.offsets is not None: - # for k in range(n): - # input_file = self.master_table.FileMap[self.instrument][this_a][this_b][k] - # input_model = datamodels.open(input_file) - # spatial_box = input_model.meta.wcsinfo.s_region - # s = spatial_box.split(' ') - # cb1 = float(s[4]) - # cb2 = float(s[6]) - # cb3 = float(s[8]) - # cb4 = float(s[10]) - for k in range(n): lmin = 0.0 lmax = 0.0 + input_model = self.master_table.FileMap[self.instrument][this_a][this_b][k] - input_file = self.master_table.FileMap[self.instrument][this_a][this_b][k] - input_model = datamodels.open(input_file) # ________________________________________________________________________________ # If offsets are provided. Pull in ra and dec offsets. raoffset = 0.0 @@ -1434,7 +1417,6 @@ def setup_ifucube_wcs(self): lambda_min.append(lmin) lambda_max.append(lmax) - input_model.close() # ________________________________________________________________________________ # done looping over files determine final size of cube corner_a = np.array(corner_a) From 299b68caa1a180a9a99efeaf25d3719d878cabc4 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Wed, 18 Sep 2024 16:14:00 -0700 Subject: [PATCH 32/39] various updates from review --- docs/jwst/cube_build/arguments.rst | 41 -------- jwst/cube_build/cube_build_step.py | 34 +++---- jwst/cube_build/ifu_cube.py | 138 +++++++++----------------- jwst/cube_build/ifuoffset.schema.yaml | 5 +- jwst/cube_build/tests/test_offset.py | 20 ++-- 5 files changed, 69 insertions(+), 169 deletions(-) diff --git a/docs/jwst/cube_build/arguments.rst b/docs/jwst/cube_build/arguments.rst index 717095d38d..8d2bfa3053 100644 --- a/docs/jwst/cube_build/arguments.rst +++ b/docs/jwst/cube_build/arguments.rst @@ -167,44 +167,3 @@ An example of making an offset file for an association containing three files is -Below is an example of making an offset file for `num` files. -The user has set up three lists `file`, `ra_offset` and `dec_offset`. The `file` list -contains the filenames and the `ra_offset` and `dec_offset` contain the Ra and Dec offsets respectively. -In this example, all the list have five values. The units of the of Ra and Dec offsets are given in -the `units` value and this value must be arc seconds. - -.. code-block:: python - - import asdf - import astropy.units as u - def create_offset_asdf(files, ra_offset, dec_offset): - - filename = [] - raoffset = [] - decoffset = [] - num = len(files) - for i in range(num): - filename.append(files[i]) - raoffset.append(ra_offset[i]) - decoffset.append(dec_offset[i]) - - tree = { - "units": str(u.arcsec), - "filename": filename, - "raoffset": raoffset, - "decoffset": decoffset - } - af = asdf.AsdfFile(tree) - af.write_to( 'offsets.asdf') - - -Set up the lists and call the above function: - -.. code-block:: python - - files = ['test1.fits', 'test2.fits', 'test3.fits', 'test4,fits', 'test5.fits'] - ra_offset = [0.1, 0.12, 0.13, 0.11, 0.12] - dec_offset = [0.14, 0.15, 0.16, 0.01, 0.1] - create_offset_asdf(files, ra_offset, dec_offset) - - diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 3341b6ba18..30da89f019 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -11,7 +11,7 @@ from ..assign_wcs.util import update_s_region_keyword from ..stpipe import Step, record_step_status from pathlib import Path - +from astropy import units __all__ = ["CubeBuildStep"] @@ -564,51 +564,41 @@ def check_offset_file(self): try: af = asdf.open(self.offset_file, custom_schema=DATA_PATH/'ifuoffset.schema.yaml') except: - self.log.error('Validation Error for offset file') - self.log.error('Turning off adjusting by offsets') - return None + schema_message = ('Validation Error for offset file. Fix the offset file. \n' + \ + 'The offset file needs to have the same number of elements the filename, raoffset and decoffset lists.\n' +\ + 'The units need to provided and only arcsec is allowed.') + raise Exception(schema_message) offset_filename = af['filename'] offset_ra = af['raoffset'] offset_dec = af['decoffset'] - offset_unit = af['units'] - if offset_unit != 'arcsec': - self.log.error('Provide the offset units in units of arcsec ') - self.log.error('Turning off adjusting by offsets ') - af.close() - return None - # check that all the file names in input_model are in the offset filename for model in self.input_models: file_check = model.meta.filename if file_check in offset_filename: continue else: - self.log.error('File in assocation is not found in offset list list %s', file_check) - self.log.error('Turning off adjusting by offsets') af.close() - return None + raise Exception('Error in offset file. A file in assocation is not found in offset list list %s', file_check) + # check that all the lists have the same length len_file = len(offset_filename) len_ra = len(offset_ra) len_dec = len(offset_dec) if (len_file != len_ra or len_ra != len_dec or len_file != len_dec): - self.log.error('The offset file does not have the same number of values for filename, offset_ra, offset_dec') - self.log.error('Turning off adjusting by offsets') af.close() - return None + raise Exception('Offset file error. The offset file does not have the same number of values for filename, offset_ra, offset_dec') + + offset_ra = offset_ra* units.arcsec + offset_dec = offset_dec* units.arcsec # The offset file has passed tests so set the offset dictionary offsets = {} offsets['filename'] = offset_filename offsets['raoffset'] = offset_ra offsets['decoffset'] = offset_dec - n = len(offsets['raoffset']) - # convert to degrees - for i in range(n): - offsets['raoffset'][i] = offsets['raoffset'][i]/3600.0 - offsets['decoffset'][i] = offsets['decoffset'][i]/3600.0 + af.close() return offsets diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index 5d3802d1ae..c94b41fa69 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -8,14 +8,13 @@ import math from astropy.stats import circmean -from astropy import units as u from gwcs import wcstools from stdatamodels.jwst import datamodels from stdatamodels.jwst.datamodels import dqflags from stdatamodels.jwst.transforms.models import _toindex -from astropy import units from astropy.coordinates import SkyCoord +from astropy import units as u from ..model_blender import blendmeta from ..assign_wcs import pointing @@ -116,7 +115,6 @@ def __init__(self, self.naxis3 = None self.cdelt3_normal = None self.rot_angle = None # rotation angle between Ra-Dec and IFU local instrument plane - self.median_dec = None self.a_min = 0 self.a_max = 0 @@ -1322,10 +1320,7 @@ def setup_ifucube_wcs(self): decoffset = 0.0 # pull out ra dec offset if it exists if self.offsets is not None: - filename = input_model.meta.filename - index = self.offsets['filename'].index(filename) - raoffset = self.offsets['raoffset'][index] - decoffset = self.offsets['decoffset'][index] + raoffset, decoffset = self.find_ra_dec_offset(input_model.meta.filename) # ________________________________________________________________________________ # Find the footprint of the image spectral_found = hasattr(input_model.meta.wcsinfo, 'spectral_region') @@ -1383,28 +1378,11 @@ def setup_ifucube_wcs(self): # now append this model spatial and spectral corner if self.offsets is not None: - c1 = SkyCoord(ca1, cb1, unit='deg') - c2 = SkyCoord(ca2, cb2, unit='deg') - c3 = SkyCoord(ca3, cb3, unit='deg') - c4 = SkyCoord(ca4, cb4, unit='deg') - raoffset = raoffset* units.deg - decoffset = decoffset* units.deg - - c1_new = c1.spherical_offsets_by(raoffset, decoffset) - c2_new = c2.spherical_offsets_by(raoffset, decoffset) - c3_new = c3.spherical_offsets_by(raoffset, decoffset) - c4_new = c4.spherical_offsets_by(raoffset, decoffset) - ca1 = c1_new.ra.value - cb1 = c1_new.dec.value - - ca2 = c2_new.ra.value - cb2 = c2_new.dec.value - - ca3 = c3_new.ra.value - cb3 = c3_new.dec.value - - ca4 = c4_new.ra.value - cb4 = c4_new.dec.value + ca1, cb1 = self.offset_coord(ca1, cb1, raoffset, decoffset) + ca2, cb2 = self.offset_coord(ca2, cb2, raoffset, decoffset) + ca3, cb3 = self.offset_coord(ca3, cb3, raoffset, decoffset) + ca4, cb4 = self.offset_coord(ca4, cb4, raoffset, decoffset) + corner_a.append(ca1) corner_a.append(ca2) corner_a.append(ca3) @@ -1761,15 +1739,11 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, decoffset = 0.0 # pull out ra dec offset if it exists if offsets is not None: - filename = input_model.meta.filename - index = offsets['filename'].index(filename) - raoffset = offsets['raoffset'][index] - decoffset = offsets['decoffset'][index] + raoffset, decoffset = self.find_ra_dec_offset(input_model.meta.filename) log.info("Ra and Dec offset (arc seconds) applied to file :%8.6f, %8.6f, %s", - raoffset*3600.0, - decoffset*3600.0, filename) - raoffset = raoffset* units.deg - decoffset = decoffset* units.deg + raoffset.value, + decoffset.value, input_model.meta.filename) + # check if background sky matching as been done in mrs_imatch step # If it has not been subtracted and the background has not been # subtracted - subtract it. @@ -1808,10 +1782,7 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, # offset the central pixel if offsets is not None: - c1 = SkyCoord(ra, dec, unit='deg') - c1_new = c1.spherical_offsets_by(raoffset, decoffset) - ra = c1_new.ra.value - dec = c1_new.dec.value + ra, dec = self.offset_coord(ra, dec, raoffset, decoffset) valid1 = ~np.isnan(ra) ra = ra[valid1] @@ -1854,27 +1825,11 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, # now offset the pixel corners if offsets is not None: - c1 = SkyCoord(ra1, dec1, unit='deg') - c2 = SkyCoord(ra2, dec2, unit='deg') - c3 = SkyCoord(ra3, dec3, unit='deg') - c4 = SkyCoord(ra4, dec4, unit='deg') - - c1_new = c1.spherical_offsets_by(raoffset, decoffset) - c2_new = c2.spherical_offsets_by(raoffset, decoffset) - c3_new = c3.spherical_offsets_by(raoffset, decoffset) - c4_new = c4.spherical_offsets_by(raoffset, decoffset) - ra1 = c1_new.ra.value - dec1 = c1_new.dec.value - - ra2 = c2_new.ra.value - dec2 = c2_new.dec.value - - ra3 = c3_new.ra.value - dec3 = c3_new.dec.value - - ra4 = c4_new.ra.value - dec4 = c4_new.dec.value - + ra1, dec1 = self.offset_coord(ra1, dec1, raoffset, decoffset) + ra2, dec2 = self.offset_coord(ra2, dec2, raoffset, decoffset) + ra3, dec3 = self.offset_coord(ra3, dec3, raoffset, decoffset) + ra4, dec4 = self.offset_coord(ra4, dec4, raoffset, decoffset) + corner_coord = [ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4] sky_result = (x, y, ra, dec, wave, slice_no, dwave, corner_coord) @@ -1904,15 +1859,10 @@ def map_nirspec_pixel_to_sky(self, input_model, offsets): decoffset = 0.0 # pull out ra dec offset if it exists if offsets is not None: - filename = input_model.meta.filename - index = offsets['filename'].index(filename) - raoffset = offsets['raoffset'][index] - decoffset = offsets['decoffset'][index] + raoffset, decoffset = self.find_ra_dec_offset(input_model.meta.filename) log.info("Ra and Dec offset (arc seconds) applied to file :%8.6f, %8.6f, %s", - raoffset*3600.0, decoffset*3600.0, filename) + raoffset.value, decoffset.value, input_model.meta.filename) - raoffset = raoffset* units.deg - decoffset = decoffset* units.deg # initialize the ra,dec, and wavelength arrays # we will loop over slice_nos and fill in values # the flag_det will be set when a slice_no pixel is filled in @@ -2078,32 +2028,14 @@ def map_nirspec_pixel_to_sky(self, input_model, offsets): if offsets is not None: # central pixel - c1 = SkyCoord(ra, dec, unit='deg') - c1_new = c1.spherical_offsets_by(raoffset, decoffset) - ra = c1_new.ra.value - dec = c1_new.dec.value + ra, dec = self.offset_coord(ra, dec, raoffset, decoffset) # pixel corners - c1 = SkyCoord(ra1, dec1, unit='deg') - c2 = SkyCoord(ra2, dec2, unit='deg') - c3 = SkyCoord(ra3, dec3, unit='deg') - c4 = SkyCoord(ra4, dec4, unit='deg') - c1_new = c1.spherical_offsets_by(raoffset, decoffset) - c2_new = c2.spherical_offsets_by(raoffset, decoffset) - c3_new = c3.spherical_offsets_by(raoffset, decoffset) - c4_new = c4.spherical_offsets_by(raoffset, decoffset) - ra1 = c1_new.ra.value - dec1 = c1_new.dec.value - - ra2= c2_new.ra.value - dec2 = c2_new.dec.value - - ra3 = c3_new.ra.value - dec3 = c3_new.dec.value - - ra4= c4_new.ra.value - dec4 = c4_new.dec.value - + ra1, dec1 = self.offset_coord(ra1, dec1, raoffset, decoffset) + ra2, dec2 = self.offset_coord(ra2, dec2, raoffset, decoffset) + ra3, dec3 = self.offset_coord(ra3, dec3, raoffset, decoffset) + ra4, dec4 = self.offset_coord(ra4, dec4, raoffset, decoffset) + corner_coord = [ra1, dec1, ra2, dec2, ra3, dec3, ra4, dec4] sky_result = (x, y, ra, dec, wave, slice_no, dwave, corner_coord) return sky_result @@ -2504,7 +2436,27 @@ def blend_output_metadata(self, IFUCube): ], ) + # ******************************************************************************** + def find_ra_dec_offset(self, filename): + """ Match the filename in the offset list with input_model.meta.filename and return + the corresponding Ra and Dec offset + """ + + index = self.offsets['filename'].index(filename) + raoffset = self.offsets['raoffset'][index] + decoffset = self.offsets['decoffset'][index] + return raoffset, decoffset + + # ******************************************************************************** + def offset_coord(self, ra, dec, raoffset, decoffset): + coord = SkyCoord(ra, dec, unit='deg') + coord_new = coord.spherical_offsets_by(raoffset, decoffset) + + ra_new = coord_new.ra.value + dec_new = coord_new.dec.value + return ra_new, dec_new + class IncorrectInput(Exception): """ Raises an exception if input parameter, Interpolation, is set to area when more than one file is used to build the cube. diff --git a/jwst/cube_build/ifuoffset.schema.yaml b/jwst/cube_build/ifuoffset.schema.yaml index c2c7d40c6a..cd9d2737a4 100644 --- a/jwst/cube_build/ifuoffset.schema.yaml +++ b/jwst/cube_build/ifuoffset.schema.yaml @@ -7,9 +7,8 @@ type: object properties: units: description: Units of the ra and dec offset values. - anyOf: - - type: string - - $ref: http://stsci.edu/schemas/asdf/unit/unit-1.0.0 + type: string + enum: ['arcsec'] filename: description: list of filenames type: array diff --git a/jwst/cube_build/tests/test_offset.py b/jwst/cube_build/tests/test_offset.py index 275764bba2..58149a5bd5 100644 --- a/jwst/cube_build/tests/test_offset.py +++ b/jwst/cube_build/tests/test_offset.py @@ -158,8 +158,9 @@ def test2_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): miri_ifushort_short_2files[0].meta.filename = 'test3.fits' step.offset_file = offset_file - offsets = step.check_offset_file() - assert offsets is None + + with pytest.raises(Exception): + offsets = step.check_offset_file() def test_offset_file_units(tmp_cwd, miri_ifushort_short_2files, offset_file_arcmin): @@ -170,9 +171,8 @@ def test_offset_file_units(tmp_cwd, miri_ifushort_short_2files, offset_file_arcm step.input_models = miri_ifushort_short_2files step.offset_file = offset_file_arcmin - offsets = step.check_offset_file() - assert offsets is None - + with pytest.raises(Exception): + offsets = step.check_offset_file() def test_read_offset_file(miri_ifushort_short_2files, offset_file): """ Test offset file has been read in correctly""" @@ -276,15 +276,15 @@ def test_read_offset_file(miri_ifushort_short_2files, offset_file): raoffset = [0.0, 0.1] decoffset = [0.0, 0.15] - ravalues = thiscube.offsets['raoffset'] + ravalues = thiscube.offsets['raoffset'] decvalues = thiscube.offsets['decoffset'] assert thiscube.offsets['filename'] == filename - assert math.isclose(ravalues[0]*3600.0, raoffset[0], abs_tol=0.0001) - assert math.isclose(ravalues[1]*3600.0, raoffset[1], abs_tol=0.0001) - assert math.isclose(decvalues[0]*3600.0, decoffset[0], abs_tol=0.0001) - assert math.isclose(decvalues[1]*3600.0, decoffset[1], abs_tol=0.0001) + assert math.isclose(ravalues[0].value, raoffset[0], abs_tol=0.0001) + assert math.isclose(ravalues[1].value, raoffset[1], abs_tol=0.0001) + assert math.isclose(decvalues[0].value, decoffset[0], abs_tol=0.0001) + assert math.isclose(decvalues[1].value, decoffset[1], abs_tol=0.0001) From 3c51fca28cd7b24983c2e71ead92a7d66c489916 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 19 Sep 2024 09:39:21 -0700 Subject: [PATCH 33/39] update comments --- jwst/cube_build/cube_build_step.py | 17 ++++++++++------- jwst/cube_build/ifu_cube.py | 7 +++++-- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 30da89f019..33a349eb8c 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -64,7 +64,7 @@ class CubeBuildStep (Step): search_output_file = boolean(default=false) output_use_model = boolean(default=true) # Use filenames in the output models suffix = string(default='s3d') - offset_file = string(default=None) + offset_file = string(default=None) # Filename containing a list of Ra and Dec offsets to apply to files. debug_spaxel = string(default='-1 -1 -1') # Default not used """ @@ -239,7 +239,7 @@ def process(self, input): # ________________________________________________________________________________ # If an offset file is provided do some basic checks on the file and its contents. # The offset list contains a matching list to the files in the association -# used in calspec3 (or offline cube building). +# used in calspec3 (for offline cube building). # Each row in the offset list contain a filename, ra offset and dec offset. # The offset list is an asdf file. self.offsets = None @@ -546,15 +546,15 @@ def read_user_input(self): # ________________________________________________________________________________ def check_offset_file(self): - """Read in an optional ra and dec offsets for each file. + """Read in an optional ra and dec offset for each file. Summary ---------- Check that is file is asdf file. - check the file has the correct format: + Check the file has the correct format using an local schema file. + The schema file, ifuoffset.schema.yaml, is located in the jwst/cube_build directory. For each file in the input assocation check that there is a corresponding file in the offset file. - Also check that each file in the offset list contain a ra offset and dec offset. """ @@ -565,7 +565,8 @@ def check_offset_file(self): af = asdf.open(self.offset_file, custom_schema=DATA_PATH/'ifuoffset.schema.yaml') except: schema_message = ('Validation Error for offset file. Fix the offset file. \n' + \ - 'The offset file needs to have the same number of elements the filename, raoffset and decoffset lists.\n' +\ + 'The offset file needs to have the same number of elements ' + \ + 'in the three lists: filename, raoffset and decoffset.\n' +\ 'The units need to provided and only arcsec is allowed.') raise Exception(schema_message) @@ -573,7 +574,9 @@ def check_offset_file(self): offset_filename = af['filename'] offset_ra = af['raoffset'] offset_dec = af['decoffset'] - + # Note: + # af['units'] is checked by the schema validation. It must be arcsec or a validation error occurs. + # check that all the file names in input_model are in the offset filename for model in self.input_models: file_check = model.meta.filename diff --git a/jwst/cube_build/ifu_cube.py b/jwst/cube_build/ifu_cube.py index c94b41fa69..3512e03542 100644 --- a/jwst/cube_build/ifu_cube.py +++ b/jwst/cube_build/ifu_cube.py @@ -1741,8 +1741,7 @@ def map_miri_pixel_to_sky(self, input_model, this_par1, subtract_background, if offsets is not None: raoffset, decoffset = self.find_ra_dec_offset(input_model.meta.filename) log.info("Ra and Dec offset (arc seconds) applied to file :%8.6f, %8.6f, %s", - raoffset.value, - decoffset.value, input_model.meta.filename) + raoffset.value, decoffset.value, input_model.meta.filename) # check if background sky matching as been done in mrs_imatch step # If it has not been subtracted and the background has not been @@ -2449,6 +2448,10 @@ def find_ra_dec_offset(self, filename): # ******************************************************************************** def offset_coord(self, ra, dec, raoffset, decoffset): + """ Given an ra,dec and ra offset and dec offset, use astropy SkyCoord functions + to apply the offsets + """ + coord = SkyCoord(ra, dec, unit='deg') coord_new = coord.spherical_offsets_by(raoffset, decoffset) From 45d15f1f009d6f44dc46c7131943f23b2d6f9cec Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 19 Sep 2024 10:00:46 -0700 Subject: [PATCH 34/39] fix comment --- jwst/cube_build/cube_build_step.py | 1 - 1 file changed, 1 deletion(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 33a349eb8c..6ee50d3f1b 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -240,7 +240,6 @@ def process(self, input): # If an offset file is provided do some basic checks on the file and its contents. # The offset list contains a matching list to the files in the association # used in calspec3 (for offline cube building). -# Each row in the offset list contain a filename, ra offset and dec offset. # The offset list is an asdf file. self.offsets = None From 04cbf6846b7fecb0d15155e55b69e8d9c3c45109 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 19 Sep 2024 10:12:36 -0700 Subject: [PATCH 35/39] cube_build_step.py --- jwst/cube_build/cube_build_step.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jwst/cube_build/cube_build_step.py b/jwst/cube_build/cube_build_step.py index 6ee50d3f1b..d9678cc7a4 100755 --- a/jwst/cube_build/cube_build_step.py +++ b/jwst/cube_build/cube_build_step.py @@ -583,7 +583,7 @@ def check_offset_file(self): continue else: af.close() - raise Exception('Error in offset file. A file in assocation is not found in offset list list %s', file_check) + raise ValueError('Error in offset file. A file in the assocation is not found in offset list %s', file_check) # check that all the lists have the same length len_file = len(offset_filename) @@ -591,7 +591,7 @@ def check_offset_file(self): len_dec = len(offset_dec) if (len_file != len_ra or len_ra != len_dec or len_file != len_dec): af.close() - raise Exception('Offset file error. The offset file does not have the same number of values for filename, offset_ra, offset_dec') + raise ValueError('The offset file does not have the same number of values for filename, raoffset, decoffset') offset_ra = offset_ra* units.arcsec offset_dec = offset_dec* units.arcsec From 8e8540eef554818134021356b1b7596b989479b5 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 19 Sep 2024 14:21:31 -0700 Subject: [PATCH 36/39] fix test test_offset.py ruff issue --- jwst/cube_build/tests/test_offset.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/jwst/cube_build/tests/test_offset.py b/jwst/cube_build/tests/test_offset.py index 58149a5bd5..e14da0fdf3 100644 --- a/jwst/cube_build/tests/test_offset.py +++ b/jwst/cube_build/tests/test_offset.py @@ -3,7 +3,6 @@ """ import pytest -import sys import math import asdf from stdatamodels.jwst import datamodels @@ -159,8 +158,8 @@ def test2_offset_file_config(tmp_cwd, miri_ifushort_short_2files, offset_file): miri_ifushort_short_2files[0].meta.filename = 'test3.fits' step.offset_file = offset_file - with pytest.raises(Exception): - offsets = step.check_offset_file() + with pytest.raises(ValueError): + step.check_offset_file() def test_offset_file_units(tmp_cwd, miri_ifushort_short_2files, offset_file_arcmin): @@ -172,7 +171,7 @@ def test_offset_file_units(tmp_cwd, miri_ifushort_short_2files, offset_file_arcm step.offset_file = offset_file_arcmin with pytest.raises(Exception): - offsets = step.check_offset_file() + step.check_offset_file() def test_read_offset_file(miri_ifushort_short_2files, offset_file): """ Test offset file has been read in correctly""" From dfa83a7ae0056922067e6f258220b52985834114 Mon Sep 17 00:00:00 2001 From: jemorrison Date: Thu, 19 Sep 2024 14:25:11 -0700 Subject: [PATCH 37/39] fix test --- jwst/cube_build/tests/test_configuration.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/jwst/cube_build/tests/test_configuration.py b/jwst/cube_build/tests/test_configuration.py index 9a9eee11db..b98fa754fb 100644 --- a/jwst/cube_build/tests/test_configuration.py +++ b/jwst/cube_build/tests/test_configuration.py @@ -3,11 +3,8 @@ """ import pytest -import asdf from stdatamodels.jwst import datamodels -import astropy.units as u -from jwst.cube_build import CubeBuildStep from jwst.cube_build import cube_build from jwst.cube_build import file_table From 4d60e7a08b69edf3d01db9afcbbc5d0377dc1404 Mon Sep 17 00:00:00 2001 From: Maria Date: Fri, 20 Sep 2024 13:48:45 -0400 Subject: [PATCH 38/39] JP-3695: Clean up unnecessary copies (#8676) Co-authored-by: Tyler Pauly --- CHANGES.rst | 88 ++++++++++- docs/jwst/emicorr/arguments.rst | 3 - jwst/charge_migration/charge_migration.py | 11 +- .../charge_migration/charge_migration_step.py | 19 +-- jwst/dark_current/dark_current_step.py | 38 +++-- jwst/dq_init/dq_init_step.py | 27 ++-- jwst/dq_init/dq_initialization.py | 15 +- jwst/emicorr/emicorr.py | 66 ++++---- jwst/emicorr/emicorr_step.py | 32 ++-- jwst/emicorr/tests/test_emicorr.py | 8 +- jwst/firstframe/firstframe_step.py | 25 +-- jwst/firstframe/firstframe_sub.py | 12 +- jwst/firstframe/tests/test_firstframe.py | 5 +- jwst/gain_scale/gain_scale.py | 7 +- jwst/gain_scale/gain_scale_step.py | 17 +- jwst/gain_scale/tests/test_gain_scale.py | 3 +- jwst/group_scale/group_scale.py | 2 + jwst/group_scale/group_scale_step.py | 33 ++-- jwst/ipc/ipc_corr.py | 33 ++-- jwst/ipc/ipc_step.py | 23 +-- jwst/jump/jump.py | 33 ++-- jwst/jump/jump_step.py | 28 ++-- jwst/lastframe/lastframe_step.py | 24 +-- jwst/lastframe/lastframe_sub.py | 9 +- jwst/lastframe/tests/test_lastframe.py | 5 +- jwst/linearity/linearity.py | 15 +- jwst/linearity/linearity_step.py | 23 +-- jwst/persistence/persistence_step.py | 146 +++++++++--------- jwst/pipeline/calwebb_detector1.py | 2 +- jwst/ramp_fitting/ramp_fit_step.py | 47 +++--- jwst/refpix/irs2_subtract_reference.py | 21 ++- jwst/refpix/refpix_step.py | 42 ++--- jwst/regtest/test_miri_image.py | 35 +++++ jwst/reset/reset_step.py | 53 ++++--- jwst/reset/reset_sub.py | 21 ++- jwst/reset/tests/test_reset_sub.py | 5 +- jwst/rscd/rscd_step.py | 47 +++--- jwst/rscd/rscd_sub.py | 55 +++---- jwst/rscd/tests/test_rscd.py | 8 +- jwst/saturation/saturation.py | 48 +++--- jwst/saturation/saturation_step.py | 26 ++-- jwst/stpipe/core.py | 1 + jwst/superbias/bias_sub.py | 13 +- jwst/superbias/superbias_step.py | 22 +-- jwst/superbias/tests/test_bias_sub.py | 12 +- 45 files changed, 672 insertions(+), 536 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index f269d7a904..1a61f13086 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -63,6 +63,11 @@ calwebb_detector1 - Added the optional ``clean_flicker_noise`` step between ``jump`` and ``ramp_fit``. [#8669] +change_migration +---------------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + clean_flicker_noise ------------------- @@ -91,6 +96,11 @@ datamodels - Added `ModelLibrary` class to allow passing on-disk models between steps in the image3 pipeline. [#8683] +dark_current +------------ + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + documentation ------------- @@ -99,12 +109,24 @@ documentation - Updated description of association keyword `expname`: including path information in addition to the filename is discouraged, but allowed. [#8789] +dq_init +-------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + emicorr ------- - Fixed a bug where MIRI EMI correction step would return NaNs when it was unable to compute a correction. [#8675] +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + +first_frame +----------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + flat_field ---------- @@ -113,6 +135,11 @@ flat_field - Replaced deep copies of NIRSpec WCS objects within most loops [#8793] +gain_scale +---------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + general ------- @@ -126,12 +153,37 @@ general - bump dependency to use ``stcal 1.9.0`` [#8808] +group_scale +----------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + +ipc +--- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + +jump +---- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + klip ---- - Allowed klip to ingest a single shifted 3-D PSF model instead of a 4-D structure containing one shifted PSF per science integration. [#8747] +lastframe +--------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + +linearity +--------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + master_background ----------------- @@ -196,6 +248,11 @@ pathloss - Replaced deep copies of NIRSpec WCS objects within most loops [#8793] +persistence +----------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + photom ------ @@ -232,6 +289,18 @@ ramp_fitting - Updated the flow of the detector 1 pipeline when selecting the ``LIKELY`` algorithm for ramp fitting. The ramps must contain a minimum number of groups (4).[#8631] +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + +refpix +------ + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + +regtest +------- + +- Added memory usage test for Detector1 pipeline. [#8676] + resample -------- @@ -245,6 +314,11 @@ resample - Ensure that NaNs and DO_NOT_USE flags match up in all input data before resampling. [#8557] +reset +----- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + resample_spec ------------- @@ -269,12 +343,19 @@ resample_spec - Ensure that NaNs and DO_NOT_USE flags match up in all input data before resampling. [#8557] +rscd +---- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + saturation ---------- - Add option for using the readout pattern information to improve saturation flagging in grouped data. [#8731] - + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + scripts ------- @@ -301,6 +382,11 @@ stpipe - Log jwst version at end of `Step.run`. [#8769] +superbias +--------- + +- Removed unnecessary copies, and created a single copy at step.py level. [#8676] + tso_photometry -------------- diff --git a/docs/jwst/emicorr/arguments.rst b/docs/jwst/emicorr/arguments.rst index bb0510fb14..b4000e8997 100644 --- a/docs/jwst/emicorr/arguments.rst +++ b/docs/jwst/emicorr/arguments.rst @@ -12,9 +12,6 @@ The ``emicorr`` step has the following step-specific arguments. If True, the reference wavelength will be scaled to the data's phase amplitude. -``--user_supplied_reffile`` (boolean, default=None) - This is to specify an ASDF-format user-created reference file. - ``--save_intermediate_results`` (string, default=False) This is a boolean flag to specify whether to write a step output file with the EMI correction, and a reference file with all the diff --git a/jwst/charge_migration/charge_migration.py b/jwst/charge_migration/charge_migration.py index ccfcc38366..aa5e909468 100644 --- a/jwst/charge_migration/charge_migration.py +++ b/jwst/charge_migration/charge_migration.py @@ -16,13 +16,13 @@ CHLO_DNU = CHLO + DNU -def charge_migration(input_model, signal_threshold): +def charge_migration(output_model, signal_threshold): """ Correct for chargemigration Parameters ---------- - input_model : `~jwst.datamodels.RampModel` + output_model : `~jwst.datamodels.RampModel` The input science data to be corrected signal_threshold : float @@ -35,11 +35,8 @@ def charge_migration(input_model, signal_threshold): DO_NOT_USE flags to groups exceeding signal_threshold """ - data = input_model.data - gdq = input_model.groupdq - - # Create the output model as a copy of the input - output_model = input_model.copy() + data = output_model.data + gdq = output_model.groupdq log.info('Using signal_threshold: %.2f', signal_threshold) diff --git a/jwst/charge_migration/charge_migration_step.py b/jwst/charge_migration/charge_migration_step.py index e788e4a182..a110e4610b 100755 --- a/jwst/charge_migration/charge_migration_step.py +++ b/jwst/charge_migration/charge_migration_step.py @@ -1,9 +1,8 @@ #! /usr/bin/env python import logging -from ..stpipe import Step - from stdatamodels.jwst import datamodels +from ..stpipe import Step from . import charge_migration @@ -25,22 +24,24 @@ class ChargeMigrationStep(Step): skip = boolean(default=True) """ - def process(self, input): + def process(self, step_input): # Open the input data model - with datamodels.RampModel(input) as input_model: + with datamodels.RampModel(step_input) as input_model: + if (input_model.data.shape[1] < 3): # skip step if only 1 or 2 groups/integration log.info('Too few groups per integration; skipping charge_migration') - - result = input_model - result.meta.cal_step.charge_migration = 'SKIPPED' - return result + input_model.meta.cal_step.charge_migration = 'SKIPPED' + return input_model + + # Work on a copy + result = input_model.copy() # Retrieve the parameter value(s) signal_threshold = self.signal_threshold - result = charge_migration.charge_migration(input_model, signal_threshold) + result = charge_migration.charge_migration(result, signal_threshold) result.meta.cal_step.charge_migration = 'COMPLETE' return result diff --git a/jwst/dark_current/dark_current_step.py b/jwst/dark_current/dark_current_step.py index a7ae3c2761..0bf01cfdb8 100755 --- a/jwst/dark_current/dark_current_step.py +++ b/jwst/dark_current/dark_current_step.py @@ -23,10 +23,10 @@ class DarkCurrentStep(Step): reference_file_types = ['dark'] - def process(self, input): + def process(self, step_input): # Open the input data model - with datamodels.RampModel(input) as input_model: + with datamodels.RampModel(step_input) as input_model: # Get the name of the dark reference file to use self.dark_name = self.get_reference_file(input_model, 'dark') @@ -36,9 +36,11 @@ def process(self, input): if self.dark_name == 'N/A': self.log.warning('No DARK reference file found') self.log.warning('Dark current step will be skipped') - result = input_model.copy() - result.meta.cal_step.dark = 'SKIPPED' - return result + input_model.meta.cal_step.dark = 'SKIPPED' + return input_model + + # Work on a copy + result = input_model.copy() # Create name for the intermediate dark, if desired. dark_output = self.dark_output @@ -49,7 +51,7 @@ def process(self, input): ) # Open the dark ref file data model - based on Instrument - instrument = input_model.meta.instrument.name + instrument = result.meta.instrument.name if instrument == 'MIRI': dark_model = datamodels.DarkMIRIModel(self.dark_name) else: @@ -58,20 +60,23 @@ def process(self, input): # Store user-defined average_dark_current in model, if provided # A user-defined value will take precedence over any value present # in dark reference file - self.set_average_dark_current(input_model, dark_model) + self.set_average_dark_current(result, dark_model) # Do the dark correction - result = dark_sub.do_correction( - input_model, dark_model, dark_output + correction = dark_sub.do_correction( + result, dark_model, dark_output ) - out_data, dark_data = result + out_data, dark_data = correction if dark_data is not None and dark_data.save: save_dark_data_as_dark_model(dark_data, dark_model, instrument) - dark_model.close() - out_ramp = dark_output_data_2_ramp_model(out_data, input_model) + out_ramp = dark_output_data_2_ramp_model(out_data, result) + + # Cleanup + del dark_model + del result return out_ramp @@ -140,7 +145,7 @@ def save_dark_data_as_dark_model(dark_data, dark_model, instrument): out_dark_model.close() -def dark_output_data_2_ramp_model(out_data, input_model): +def dark_output_data_2_ramp_model(out_data, out_model): """ Convert computed output data from the dark step to a RampModel. @@ -149,7 +154,7 @@ def dark_output_data_2_ramp_model(out_data, input_model): out_data: ScienceData Computed science data from the dark current step. - input_model: RampModel + out_model: RampModel The input ramp model from which to subtract the dark current. Return @@ -161,10 +166,9 @@ def dark_output_data_2_ramp_model(out_data, input_model): if out_data.cal_step == "SKIPPED": # If processing was skipped in the lower-level routines, # just return the unmodified input model - input_model.meta.cal_step.dark_sub = "SKIPPED" - return input_model + out_model.meta.cal_step.dark_sub = "SKIPPED" + return out_model else: - out_model = input_model.copy() out_model.meta.cal_step.dark_sub = out_data.cal_step out_model.data = out_data.data out_model.groupdq = out_data.groupdq diff --git a/jwst/dq_init/dq_init_step.py b/jwst/dq_init/dq_init_step.py index ead63fda7c..1f62970454 100644 --- a/jwst/dq_init/dq_init_step.py +++ b/jwst/dq_init/dq_init_step.py @@ -21,9 +21,11 @@ class DQInitStep(Step): class_alias = "dq_init" + spec = """ + """ reference_file_types = ['mask'] - def process(self, input): + def process(self, step_input): """Perform the dq_init calibration step Parameters @@ -39,20 +41,19 @@ def process(self, input): # Try to open the input as a regular RampModel try: - input_model = datamodels.RampModel(input) - + input_model = datamodels.RampModel(step_input) # Check to see if it's Guider raw data if input_model.meta.exposure.type in dq_initialization.guider_list: # Reopen as a GuiderRawModel input_model.close() - input_model = datamodels.GuiderRawModel(input) + input_model = datamodels.GuiderRawModel(step_input) self.log.info("Input opened as GuiderRawModel") except (TypeError, ValueError): # If the initial open attempt fails, # try to open as a GuiderRawModel try: - input_model = datamodels.GuiderRawModel(input) + input_model = datamodels.GuiderRawModel(step_input) self.log.info("Input opened as GuiderRawModel") except (TypeError, ValueError): self.log.error("Unexpected or unknown input model type") @@ -68,18 +69,20 @@ def process(self, input): if self.mask_filename == 'N/A': self.log.warning('No MASK reference file found') self.log.warning('DQ initialization step will be skipped') - result = input_model.copy() - result.meta.cal_step.dq_init = 'SKIPPED' - return result + input_model.meta.cal_step.dq_init = 'SKIPPED' + return input_model + + # Work on a copy + result = input_model.copy() # Load the reference file mask_model = datamodels.MaskModel(self.mask_filename) # Apply the step - result = dq_initialization.correct_model(input_model, mask_model) + result = dq_initialization.correct_model(result, mask_model) - # Close the data models for the input and ref file - input_model.close() - mask_model.close() + # Cleanup + del mask_model + del input_model return result diff --git a/jwst/dq_init/dq_initialization.py b/jwst/dq_init/dq_initialization.py index d07c7e6794..9c67d16a35 100644 --- a/jwst/dq_init/dq_initialization.py +++ b/jwst/dq_init/dq_initialization.py @@ -38,12 +38,12 @@ def correct_model(input_model, mask_model): return output_model -def do_dqinit(input_model, mask_model): +def do_dqinit(output_model, mask_model): """Perform the dq_init step on a JWST datamodel Parameters ---------- - input_model : input JWST datamodel + output_model : input JWST datamodel The jwst datamodel to be corrected mask_model : mask datamodel @@ -56,10 +56,7 @@ def do_dqinit(input_model, mask_model): """ # Inflate empty DQ array, if necessary - check_dimensions(input_model) - - # Create output model as copy of input - output_model = input_model.copy() + check_dimensions(output_model) # Extract subarray from reference data, if necessary if reffile_utils.ref_matches_sci(output_model, mask_model): @@ -72,11 +69,11 @@ def do_dqinit(input_model, mask_model): mask_sub_model.close() # Set model-specific data quality in output - if input_model.meta.exposure.type in guider_list: - dq = np.bitwise_or(input_model.dq, mask_array) + if output_model.meta.exposure.type in guider_list: + dq = np.bitwise_or(output_model.dq, mask_array) output_model.dq = dq else: - dq = np.bitwise_or(input_model.pixeldq, mask_array) + dq = np.bitwise_or(output_model.pixeldq, mask_array) output_model.pixeldq = dq # Additionally, propagate mask DO_NOT_USE flags to groupdq to # ensure no ramps are fit to bad pixels. diff --git a/jwst/emicorr/emicorr.py b/jwst/emicorr/emicorr.py index 099d5e908f..3c714a1e7d 100644 --- a/jwst/emicorr/emicorr.py +++ b/jwst/emicorr/emicorr.py @@ -79,7 +79,6 @@ def do_correction(input_model, emicorr_model, save_onthefly_reffile, **pars): will operate. Valid parameters include: save_intermediate_results - saves the output into a file and the reference file (if created on-the-fly) - user_supplied_reffile - reference file supplied by the user Returns ------- @@ -88,7 +87,6 @@ def do_correction(input_model, emicorr_model, save_onthefly_reffile, **pars): """ save_intermediate_results = pars['save_intermediate_results'] - user_supplied_reffile = pars['user_supplied_reffile'] nints_to_phase = pars['nints_to_phase'] nbins = pars['nbins'] scale_reference = pars['scale_reference'] @@ -98,7 +96,6 @@ def do_correction(input_model, emicorr_model, save_onthefly_reffile, **pars): output_model = apply_emicorr(input_model, emicorr_model, onthefly_corr_freq, save_onthefly_reffile, save_intermediate_results=save_intermediate_results, - user_supplied_reffile=user_supplied_reffile, nints_to_phase=nints_to_phase, nbins_all=nbins, scale_reference=scale_reference, @@ -108,11 +105,10 @@ def do_correction(input_model, emicorr_model, save_onthefly_reffile, **pars): return output_model -def apply_emicorr(input_model, emicorr_model, +def apply_emicorr(output_model, emicorr_model, onthefly_corr_freq, save_onthefly_reffile, - save_intermediate_results=False, user_supplied_reffile=None, - nints_to_phase=None, nbins_all=None, scale_reference=True, - use_n_cycles=3): + save_intermediate_results=False, nints_to_phase=None, + nbins_all=None, scale_reference=True, use_n_cycles=3): """ -> NOTE: This is translated from IDL code fix_miri_emi.pro @@ -141,7 +137,7 @@ def apply_emicorr(input_model, emicorr_model, Parameters ---------- - input_model : `~jwst.datamodels.JwstDataModel` + output_model : `~jwst.datamodels.JwstDataModel` input science data model to be emi-corrected emicorr_model : `~jwst.datamodels.EmiModel` @@ -156,9 +152,6 @@ def apply_emicorr(input_model, emicorr_model, save_intermediate_results : bool Saves the output into a file and the reference file (if created on-the-fly) - user_supplied_reffile : str - Reference file supplied by the user - nints_to_phase : int Number of integrations to phase @@ -178,11 +171,13 @@ def apply_emicorr(input_model, emicorr_model, input science data model which has been emi-corrected """ # get the subarray case and other info - detector = input_model.meta.instrument.detector - subarray = input_model.meta.subarray.name - readpatt = input_model.meta.exposure.readpatt - xsize = input_model.meta.subarray.xsize # SUBSIZE1 keyword - xstart = input_model.meta.subarray.xstart # SUBSTRT1 keyword + detector = output_model.meta.instrument.detector + subarray = output_model.meta.subarray.name + readpatt = output_model.meta.exposure.readpatt + xsize = output_model.meta.subarray.xsize # SUBSIZE1 keyword + xstart = output_model.meta.subarray.xstart # SUBSTRT1 keyword + # get the number of samples, 10us sample times per pixel (1 for fastmode, 9 for slowmode) + nsamples = output_model.meta.exposure.nsamples # get the subarray case from either the ref file or set default values freqs_numbers = [] @@ -216,11 +211,7 @@ def apply_emicorr(input_model, emicorr_model, # no subarray or read pattern match found, print to log and skip correction return subname - # get the number of samples, 10us sample times per pixel (1 for fastmode, 9 for slowmode) - nsamples = input_model.meta.exposure.nsamples - # Initialize the output model as a copy of the input - output_model = input_model.copy() nints, ngroups, ny, nx = np.shape(output_model.data) # create the dictionary to store the frequencies and corresponding phase amplitudes @@ -235,8 +226,7 @@ def apply_emicorr(input_model, emicorr_model, log.info('Correcting for frequency: {} Hz ({} out of {})'.format(frequency, fi+1, len(freqs2correct))) # Read image data and set up some variables - orig_data = output_model.data - data = orig_data.copy() + data = output_model.data.copy() # Correspondance of array order in IDL # sz[0] = 4 in idl @@ -246,7 +236,7 @@ def apply_emicorr(input_model, emicorr_model, # sz[4] = nints nx4 = int(nx/4) - dd_all = np.ones((nints, ngroups, ny, nx4)) + dd_all = np.zeros((nints, ngroups, ny, nx4)) log.info('Subtracting self-superbias from each group of each integration and') # Calculate times of all pixels in the input integration, then use that to calculate @@ -287,7 +277,7 @@ def apply_emicorr(input_model, emicorr_model, # this number comes from the subarray definition (see subarray_cases dict above), but # calculate it from the input image header here just in case the subarray definitions # are not available to this routine. - colstop = int( xsize/4 + xstart - 1 ) + colstop = int(xsize/4 + xstart - 1) log.info('doing phase calculation per integration') for ninti in range(nints): @@ -301,7 +291,7 @@ def apply_emicorr(input_model, emicorr_model, # subtract source+sky from each frame of this ramp for ngroupi in range(ngroups): - data[ninti, ngroupi, ...] = orig_data[ninti, ngroupi, ...] - (s0 * ngroupi) + data[ninti, ngroupi, ...] = output_model.data[ninti, ngroupi, ...] - (s0 * ngroupi) # make a self-superbias m0 = minmed(data[ninti, 1:ngroups-1, :, :]) @@ -384,12 +374,12 @@ def apply_emicorr(input_model, emicorr_model, nb_over_nbins = [nb/nbins for nb in range(nbins)] nbp1_over_nbins = [(nb + 1)/nbins for nb in range(nbins)] # Construct a phase map and dd map for only the nints_to_phase - phase_temp = phaseall[0:nints_to_phase,:,:,:] - dd_temp = dd_all[0:nints_to_phase,:,:,:] + phase_temp = phaseall[0: nints_to_phase, :, :, :] + dd_temp = dd_all[0: nints_to_phase, :, :, :] for nb in range(nbins): u = np.where((phase_temp > nb_over_nbins[nb]) & (phase_temp <= nbp1_over_nbins[nb])) # calculate the sigma-clipped mean - dmean,_,_ = scs(dd_temp[u]) + dmean, _, _ = scs(dd_temp[u]) pa[nb] = dmean # amplitude in this bin pa -= np.median(pa) @@ -441,32 +431,32 @@ def apply_emicorr(input_model, emicorr_model, lut = lut_reference if save_intermediate_results and save_onthefly_reffile is not None: - freq_pa_dict['frequencies'][frequency_name] = {'frequency' : frequency, - 'phase_amplitudes' : pa} + freq_pa_dict['frequencies'][frequency_name] = {'frequency': frequency, + 'phase_amplitudes': pa} log.info('Creating phased-matched noise model to subtract from data') # This is the phase matched noise model to subtract from each pixel of the input image dd_noise = lut[(phaseall * period_in_pixels).astype(int)] # Interleave (straight copy) into 4 amps - noise = np.ones((nints, ngroups, ny, nx)) # same size as input data + noise = np.zeros((nints, ngroups, ny, nx)) # same size as input data noise_x = np.arange(nx4) * 4 for k in range(4): noise[:, :, :, noise_x + k] = dd_noise # Safety catch; anywhere the noise value is not finite, set it to zero - noise[~np.isfinite(noise)] = 0 + noise[~np.isfinite(noise)] = 0.0 # Subtract EMI noise from the input data log.info('Subtracting EMI noise from data') - corr_data = orig_data - noise - output_model.data = corr_data + output_model.data = output_model.data - noise # clean up del data del dd_all del times_this_int del phaseall + del noise if save_intermediate_results and save_onthefly_reffile is not None: if 'FAST' in readpatt: @@ -477,7 +467,8 @@ def apply_emicorr(input_model, emicorr_model, on_the_fly_subarr_case[subarray] = { 'rowclocks': rowclocks, 'frameclocks': frameclocks, - 'freqs': freqs_dict } + 'freqs': freqs_dict + } freq_pa_dict['subarray_cases'] = on_the_fly_subarr_case mk_reffile(freq_pa_dict, save_onthefly_reffile) @@ -628,7 +619,7 @@ def get_subarcase(subarray_cases, subarray, readpatt, detector): else: if "SLOW" in readpatt and "SLOW" in item and detector in item: frequencies.append(val) - elif "FAST" in readpatt and "FAST" in item: + elif "FAST" in readpatt and "FAST" in item: frequencies.append(val) if subname is not None and rowclocks is not None and frameclocks is not None and frequencies is not None: break @@ -670,6 +661,7 @@ def get_frequency_info(freqs_names_vals, frequency_name): break return freq_number, phase_amplitudes + def rebin(arr, newshape): """Rebin an array to a new shape. @@ -718,5 +710,3 @@ def mk_reffile(freq_pa_dict, emicorr_ref_filename): emicorr_model.save(emicorr_ref_filename) emicorr_model.close() log.info('On-the-fly reference file written as: %s', emicorr_ref_filename) - - diff --git a/jwst/emicorr/emicorr_step.py b/jwst/emicorr/emicorr_step.py index 27d4966098..5f24c099f2 100755 --- a/jwst/emicorr/emicorr_step.py +++ b/jwst/emicorr/emicorr_step.py @@ -1,6 +1,5 @@ #! /usr/bin/env python - from stdatamodels.jwst import datamodels from ..stpipe import Step from . import emicorr @@ -29,9 +28,10 @@ class EmiCorrStep(Step): reference_file_types = ['emicorr'] - def process(self, input): + def process(self, step_input): + # Open the input data model - with datamodels.open(input) as input_model: + with datamodels.open(step_input) as input_model: # Catch the cases to skip instrument = input_model.meta.instrument.name @@ -47,10 +47,12 @@ def process(self, input): input_model.meta.cal_step.emicorr = 'SKIPPED' return input_model + # Work on a copy + result = input_model.copy() + # Setup parameters pars = { 'save_intermediate_results': self.save_intermediate_results, - 'user_supplied_reffile': self.user_supplied_reffile, 'nints_to_phase': self.nints_to_phase, 'nbins': self.nbins, 'scale_reference': self.scale_reference, @@ -65,13 +67,13 @@ def process(self, input): self.log.info('Correcting with reference file created on-the-fly.') elif self.user_supplied_reffile is None: - emicorr_ref_filename = self.get_reference_file(input_model, 'emicorr') + emicorr_ref_filename = self.get_reference_file(result, 'emicorr') # Skip the spep if no reference file is found if emicorr_ref_filename == 'N/A': self.log.warning('No reference file found.') self.log.warning('EMICORR step will be skipped') - input_model.meta.cal_step.emicorr = 'SKIPPED' - return input_model + result.meta.cal_step.emicorr = 'SKIPPED' + return result else: self.log.info('Using CRDS reference file: {}'.format(emicorr_ref_filename)) emicorr_model = datamodels.EmiModel(emicorr_ref_filename) @@ -88,15 +90,17 @@ def process(self, input): save_onthefly_reffile = emicorr_ref_filename else: save_onthefly_reffile = None - output_model = emicorr.do_correction(input_model, emicorr_model, save_onthefly_reffile, **pars) - if isinstance(output_model, str) or output_model is None: + result = emicorr.do_correction(result, emicorr_model, save_onthefly_reffile, **pars) + if isinstance(result, str) or result is None: # in this case output_model=subarray_readpatt configuration self.log.warning('No correction match for this configuration') self.log.warning('Step skipped') - input_model.meta.cal_step.emicorr = 'SKIPPED' - return input_model + result.meta.cal_step.emicorr = 'SKIPPED' + return result + + result.meta.cal_step.emicorr = 'COMPLETE' - # close and remove the reference file created on-the-fly - output_model.meta.cal_step.emicorr = 'COMPLETE' + # Cleanup + del emicorr_model - return output_model + return result diff --git a/jwst/emicorr/tests/test_emicorr.py b/jwst/emicorr/tests/test_emicorr.py index f7678ea332..412e6033e6 100644 --- a/jwst/emicorr/tests/test_emicorr.py +++ b/jwst/emicorr/tests/test_emicorr.py @@ -65,7 +65,6 @@ def test_do_correction(): input_model = mk_data_mdl(data, 'MASK1550', 'FAST', 'MIRIMAGE') pars = { 'save_intermediate_results': False, - 'user_supplied_reffile': None, 'nints_to_phase': None, 'nbins': None, 'scale_reference': True, @@ -82,9 +81,8 @@ def test_apply_emicorr(): data = np.ones((1, 5, 20, 20)) input_model = mk_data_mdl(data, 'MASK1550', 'FAST', 'MIRIMAGE') emicorr_model, onthefly_corr_freq, save_onthefly_reffile = None, [218.3], None - outmdl = emicorr.apply_emicorr(input_model, emicorr_model, - onthefly_corr_freq, save_onthefly_reffile, - save_intermediate_results=False, user_supplied_reffile=None, + outmdl = emicorr.apply_emicorr(input_model, emicorr_model, onthefly_corr_freq, + save_onthefly_reffile, save_intermediate_results=False, nints_to_phase=None, nbins_all=None, scale_reference=True) assert outmdl is not None @@ -135,6 +133,6 @@ def test_rebin(): data[9] = 2.0 rebinned_data = emicorr.rebin(data, [7]) - compare_arr = np.array([1., 0.55, 1. , 1., 1.55, 1., 1.]) + compare_arr = np.array([1., 0.55, 1., 1., 1.55, 1., 1.]) assert compare_arr.all() == rebinned_data.all() diff --git a/jwst/firstframe/firstframe_step.py b/jwst/firstframe/firstframe_step.py index 47df912d48..47e9fcb72b 100755 --- a/jwst/firstframe/firstframe_step.py +++ b/jwst/firstframe/firstframe_step.py @@ -1,7 +1,6 @@ -from stdatamodels.jwst import datamodels - from ..stpipe import Step from . import firstframe_sub +from stdatamodels.jwst import datamodels __all__ = ["FirstFrameStep"] @@ -16,20 +15,26 @@ class FirstFrameStep(Step): class_alias = "firstframe" - def process(self, input): + spec = """ + """ + + def process(self, step_input): # Open the input data model - with datamodels.open(input) as input_model: + with datamodels.open(step_input) as input_model: # check the data is MIRI data detector = input_model.meta.instrument.detector.upper() - if detector[:3] == 'MIR': - # Do the firstframe correction subtraction - result = firstframe_sub.do_correction(input_model) - else: + if detector[:3] != 'MIR': self.log.warning('First Frame Correction is only for MIRI data') self.log.warning('First frame step will be skipped') - result = input_model.copy() - result.meta.cal_step.firstframe = 'SKIPPED' + input_model.meta.cal_step.firstframe = 'SKIPPED' + return input_model + + # Cork on a copy + result = input_model.copy() + + # Do the firstframe correction subtraction + result = firstframe_sub.do_correction(result) return result diff --git a/jwst/firstframe/firstframe_sub.py b/jwst/firstframe/firstframe_sub.py index 0086b3cc0d..a535fb203e 100644 --- a/jwst/firstframe/firstframe_sub.py +++ b/jwst/firstframe/firstframe_sub.py @@ -1,7 +1,6 @@ # # Module for the firstframe correction for MIRI science data sets # - import numpy as np import logging @@ -11,7 +10,7 @@ log.setLevel(logging.DEBUG) -def do_correction(input_model): +def do_correction(output): """ Short Summary ------------- @@ -21,7 +20,7 @@ def do_correction(input_model): Parameters ---------- - input_model: data model object + output: data model object science data to be corrected Returns @@ -32,12 +31,9 @@ def do_correction(input_model): """ # Save some data params for easy use later - sci_ngroups = input_model.data.shape[1] - - # Create output as a copy of the input science data model - output = input_model.copy() + sci_ngroups = output.data.shape[1] - # Update the step status, and if ngroups > 3, set all of the GROUPDQ in + # Update the step status, and if ngroups > 3, set all GROUPDQ in # the first group to 'DO_NOT_USE' if sci_ngroups > 3: output.groupdq[:, 0, :, :] = \ diff --git a/jwst/firstframe/tests/test_firstframe.py b/jwst/firstframe/tests/test_firstframe.py index e99c76da56..7d48d5bfce 100644 --- a/jwst/firstframe/tests/test_firstframe.py +++ b/jwst/firstframe/tests/test_firstframe.py @@ -25,8 +25,9 @@ def test_firstframe_set_groupdq(): # create a JWST datamodel for MIRI data dm_ramp = RampModel(data=data, groupdq=groupdq) - # run the first frame correction step - dm_ramp_firstframe = do_correction(dm_ramp) + # run the first frame correction step on a copy (the detection to make the copy or + # not would have happened at _step.py) + dm_ramp_firstframe = do_correction(dm_ramp.copy()) # check that the difference in the groupdq flags is equal to # the 'do_not_use' flag diff --git a/jwst/gain_scale/gain_scale.py b/jwst/gain_scale/gain_scale.py index 78ce9a2701..2de676ce7d 100644 --- a/jwst/gain_scale/gain_scale.py +++ b/jwst/gain_scale/gain_scale.py @@ -5,7 +5,7 @@ log.setLevel(logging.DEBUG) -def do_correction(input_model, gain_factor): +def do_correction(output_model, gain_factor): """ Short Summary ------------- @@ -15,7 +15,7 @@ def do_correction(input_model, gain_factor): Parameters ---------- - input_model : `~jwst.datamodels.JwstDataModel` + output_model : `~jwst.datamodels.JwstDataModel` Input datamodel to be corrected Returns @@ -25,9 +25,6 @@ def do_correction(input_model, gain_factor): """ - # Create output as a copy of the input science data model - output_model = input_model.copy() - # Apply the gain factor to the SCI and ERR arrays log.info('Rescaling by {0}'.format(gain_factor)) output_model.data *= gain_factor diff --git a/jwst/gain_scale/gain_scale_step.py b/jwst/gain_scale/gain_scale_step.py index 82b06ddcf5..97d66b6ab7 100755 --- a/jwst/gain_scale/gain_scale_step.py +++ b/jwst/gain_scale/gain_scale_step.py @@ -1,9 +1,7 @@ from stdatamodels.jwst import datamodels - from ..stpipe import Step from . import gain_scale - __all__ = ["GainScaleStep"] @@ -16,12 +14,14 @@ class GainScaleStep(Step): class_alias = "gain_scale" + spec = """ + """ reference_file_types = ['gain'] - def process(self, input): + def process(self, step_input): # Open the input data model - with datamodels.open(input) as input_model: + with datamodels.open(step_input) as input_model: # Is the gain_factor already populated in the input model? if input_model.meta.exposure.gain_factor is None: @@ -35,16 +35,19 @@ def process(self, input): self.log.info('GAINFACT not found in gain reference file') self.log.info('Step will be skipped') input_model.meta.cal_step.gain_scale = 'SKIPPED' - gain_model.close() + del gain_model return input_model else: gain_factor = gain_model.meta.exposure.gain_factor - gain_model.close() + del gain_model else: gain_factor = input_model.meta.exposure.gain_factor + # Work on a copy + result = input_model.copy() + # Do the scaling - result = gain_scale.do_correction(input_model, gain_factor) + result = gain_scale.do_correction(result, gain_factor) return result diff --git a/jwst/gain_scale/tests/test_gain_scale.py b/jwst/gain_scale/tests/test_gain_scale.py index bea0d5f651..ee507b0a4f 100644 --- a/jwst/gain_scale/tests/test_gain_scale.py +++ b/jwst/gain_scale/tests/test_gain_scale.py @@ -15,7 +15,8 @@ def test_correction(make_cubemodel): """ datmod = make_cubemodel(2, 50, 50) gf = datmod.meta.exposure.gain_factor - output = do_correction(datmod, gain_factor=gf) + # run on a copy (the detection to make the copy or not would have happened at _step.py) + output = do_correction(datmod.copy(), gain_factor=gf) assert output.meta.cal_step.gain_scale == 'COMPLETE' assert np.all(output.data == datmod.data * gf) diff --git a/jwst/group_scale/group_scale.py b/jwst/group_scale/group_scale.py index 811cb15f1a..354e8ad76c 100644 --- a/jwst/group_scale/group_scale.py +++ b/jwst/group_scale/group_scale.py @@ -40,6 +40,8 @@ def do_correction(model): # Apply the rescaling to the entire data array scale = float(frame_divisor) / nframes + if not isinstance(type(model.data), float): + model.data = (model.data).astype(float) model.data *= scale model.meta.cal_step.group_scale = 'COMPLETE' diff --git a/jwst/group_scale/group_scale_step.py b/jwst/group_scale/group_scale_step.py index 3d3166697d..9e69e77a6b 100755 --- a/jwst/group_scale/group_scale_step.py +++ b/jwst/group_scale/group_scale_step.py @@ -1,5 +1,4 @@ from stdatamodels.jwst import datamodels - from ..stpipe import Step from . import group_scale @@ -16,26 +15,26 @@ class GroupScaleStep(Step): class_alias = "group_scale" - def process(self, input): + spec = """ + """ - # Open the input data model - with datamodels.RampModel(input) as input_model: + def process(self, step_input): - # Always work on a copy - result = input_model.copy() + # Open the input data model + with datamodels.RampModel(step_input) as input_model: # Try to get values of NFRAMES and FRMDIVSR to see # if we need to do any rescaling - nframes = result.meta.exposure.nframes - frame_divisor = result.meta.exposure.frame_divisor + nframes = input_model.meta.exposure.nframes + frame_divisor = input_model.meta.exposure.frame_divisor # If we didn't find NFRAMES, we don't have enough info # to continue. Skip the step. if nframes is None: self.log.warning('NFRAMES value not found') self.log.warning('Step will be skipped') - result.meta.cal_step.group_scale = 'SKIPPED' - return result + input_model.meta.cal_step.group_scale = 'SKIPPED' + return input_model # If we didn't find FRMDIVSR, then check to see if NFRAMES # is a power of 2. If it is, rescaling isn't needed. @@ -43,18 +42,24 @@ def process(self, input): if (nframes & (nframes - 1) == 0): self.log.info('NFRAMES={} is a power of 2; correction not needed'.format(nframes)) self.log.info('Step will be skipped') - result.meta.cal_step.group_scale = 'SKIPPED' - return result + input_model.meta.cal_step.group_scale = 'SKIPPED' + return input_model # Compare NFRAMES and FRMDIVSR. If they're equal, # rescaling isn't needed. elif nframes == frame_divisor: self.log.info('NFRAMES and FRMDIVSR are equal; correction not needed') self.log.info('Step will be skipped') - result.meta.cal_step.group_scale = 'SKIPPED' - return result + input_model.meta.cal_step.group_scale = 'SKIPPED' + return input_model + + # Work on a copy + result = input_model.copy() # Do the scaling group_scale.do_correction(result) + # Cleanup + del input_model + return result diff --git a/jwst/ipc/ipc_corr.py b/jwst/ipc/ipc_corr.py index 059e23282b..d5ff99dff1 100644 --- a/jwst/ipc/ipc_corr.py +++ b/jwst/ipc/ipc_corr.py @@ -50,12 +50,12 @@ def do_correction(input_model, ipc_model): return output_model -def ipc_correction(input_model, ipc_model): +def ipc_correction(output, ipc_model): """Apply the IPC correction to the science arrays. Parameters ---------- - input_model : data model object + output : data model object The input science data. ipc_model : IPCModel object @@ -69,32 +69,29 @@ def ipc_correction(input_model, ipc_model): """ log.debug("ipc_correction: nints=%d, ngroups=%d, size=%d,%d", - input_model.meta.exposure.nints, - input_model.meta.exposure.ngroups, - input_model.data.shape[-1], - input_model.data.shape[-2]) - - # Create output as a copy of the input science data model. - output = input_model.copy() + output.meta.exposure.nints, + output.meta.exposure.ngroups, + output.data.shape[-1], + output.data.shape[-2]) # Was IRS2 readout used? - is_irs2_format = pipe_utils.is_irs2(input_model) + is_irs2_format = pipe_utils.is_irs2(output) if is_irs2_format: - irs2_mask = x_irs2.make_mask(input_model) + irs2_mask = x_irs2.make_mask(output) - detector = input_model.meta.instrument.detector + detector = output.meta.instrument.detector # The number of reference pixels along the bottom edge, top edge, # left edge, and right edge. - nref = get_num_ref_pixels(input_model) + nref = get_num_ref_pixels(output) # Get the data for the IPC kernel. This can be a slice, if input_model # is a subarray. - kernel = get_ipc_slice(input_model, ipc_model) + kernel = get_ipc_slice(output, ipc_model) log.debug("substrt1 = %d, subsize1 = %d, substrt2 = %d, subsize2 = %d" % - (input_model.meta.subarray.xstart, input_model.meta.subarray.xsize, - input_model.meta.subarray.ystart, input_model.meta.subarray.ysize)) + (output.meta.subarray.xstart, output.meta.subarray.xsize, + output.meta.subarray.ystart, output.meta.subarray.ysize)) log.debug('Number of reference pixels: bottom, top, left, right =' ' %d, %d, %d, %d' % (nref.bottom_rows, nref.top_rows, @@ -102,8 +99,8 @@ def ipc_correction(input_model, ipc_model): log.debug("Shape of ipc image = %s" % repr(ipc_model.data.shape)) # Loop over all integrations and groups in input science data. - for i in range(input_model.data.shape[0]): # integrations - for j in range(input_model.data.shape[1]): # groups + for i in range(output.data.shape[0]): # integrations + for j in range(output.data.shape[1]): # groups # Convolve the current group in-place with the IPC kernel. if is_irs2_format: # Extract normal data from input IRS2-format data. diff --git a/jwst/ipc/ipc_step.py b/jwst/ipc/ipc_step.py index 97a59f24d2..bb1fcf359f 100755 --- a/jwst/ipc/ipc_step.py +++ b/jwst/ipc/ipc_step.py @@ -14,9 +14,12 @@ class IPCStep(Step): class_alias = "ipc" + spec = """ + """ + reference_file_types = ['ipc'] - def process(self, input): + def process(self, step_input): """Apply the IPC correction. Parameters @@ -31,7 +34,7 @@ def process(self, input): """ # Open the input data model - with datamodels.RampModel(input) as input_model: + with datamodels.RampModel(step_input) as input_model: # Get the name of the ipc reference file to use self.ipc_name = self.get_reference_file(input_model, 'ipc') @@ -41,18 +44,20 @@ def process(self, input): if self.ipc_name == 'N/A': self.log.warning('No IPC reference file found') self.log.warning('IPC step will be skipped') - result = input_model.copy() - result.meta.cal_step.ipc = 'SKIPPED' - return result + input_model.meta.cal_step.ipc = 'SKIPPED' + return input_model # Open the ipc reference file data model ipc_model = datamodels.IPCModel(self.ipc_name) - # Do the ipc correction - result = ipc_corr.do_correction(input_model, ipc_model) + # Work on a copy + result = input_model.copy() - # Close the reference file and update the step status - ipc_model.close() + # Do the ipc correction + result = ipc_corr.do_correction(result, ipc_model) result.meta.cal_step.ipc = 'COMPLETE' + # Cleanup + del ipc_model + return result diff --git a/jwst/jump/jump.py b/jwst/jump/jump.py index 1658ce8ebd..92f70de2fb 100644 --- a/jwst/jump/jump.py +++ b/jwst/jump/jump.py @@ -9,7 +9,7 @@ log.setLevel(logging.DEBUG) -def run_detect_jumps(input_model, gain_model, readnoise_model, +def run_detect_jumps(output_model, gain_model, readnoise_model, rejection_thresh, three_grp_thresh, four_grp_thresh, max_cores, max_jump_to_flag_neighbors, min_jump_to_flag_neighbors, flag_4_neighbors, @@ -34,35 +34,31 @@ def run_detect_jumps(input_model, gain_model, readnoise_model, # Runs `detect_jumps` in stcal # extract data and info from input_model to pass to detect_jumps - frames_per_group = input_model.meta.exposure.nframes - data = input_model.data - gdq = input_model.groupdq - pdq = input_model.pixeldq - err = input_model.err - output_model = input_model.copy() + frames_per_group = output_model.meta.exposure.nframes # determine the number of groups that correspond to the after_jump times # needed because the group time is not passed to detect_jumps - gtime = input_model.meta.exposure.group_time + gtime = output_model.meta.exposure.group_time after_jump_flag_n1 = int(after_jump_flag_time1 // gtime) after_jump_flag_n2 = int(after_jump_flag_time2 // gtime) grps_masked_after_shower = int(time_masked_after_shower // gtime) snowball_grps_masked_next_int = int(snowball_time_masked_next_int // gtime) # Get 2D gain and read noise values from their respective models - if reffile_utils.ref_matches_sci(input_model, gain_model): + if reffile_utils.ref_matches_sci(output_model, gain_model): gain_2d = gain_model.data else: log.info('Extracting gain subarray to match science data') - gain_2d = reffile_utils.get_subarray_data(input_model, gain_model) + gain_2d = reffile_utils.get_subarray_data(output_model, gain_model) - if reffile_utils.ref_matches_sci(input_model, readnoise_model): + if reffile_utils.ref_matches_sci(output_model, readnoise_model): readnoise_2d = readnoise_model.data else: log.info('Extracting readnoise subarray to match science data') - readnoise_2d = reffile_utils.get_subarray_data(input_model, + readnoise_2d = reffile_utils.get_subarray_data(output_model, readnoise_model) new_gdq, new_pdq, number_crs, number_extended_events, stddev\ - = detect_jumps(frames_per_group, data, gdq, pdq, err, + = detect_jumps(frames_per_group, output_model.data, output_model.groupdq, + output_model.pixeldq, output_model.err, gain_2d, readnoise_2d, rejection_thresh, three_grp_thresh, four_grp_thresh, max_cores, @@ -99,14 +95,15 @@ def run_detect_jumps(input_model, gain_model, readnoise_model, # determine the number of groups with all pixels set to DO_NOT_USE dnu_flag = 1 num_flagged_grps = 0 - for integ in range(data.shape[0]): - for grp in range(data.shape[1]): - if np.all(np.bitwise_and(gdq[integ, grp, :, :], dnu_flag)): + datashape = np.shape(output_model.data) + for integ in range(datashape[0]): + for grp in range(datashape[1]): + if np.all(np.bitwise_and(output_model.groupdq[integ, grp, :, :], dnu_flag)): num_flagged_grps += 1 - total_groups = data.shape[0] * data.shape[1] - num_flagged_grps - data.shape[0] + total_groups = datashape[0] * datashape[1] - num_flagged_grps - datashape[0] if total_groups >= 1: total_time = output_model.meta.exposure.group_time * total_groups - total_pixels = data.shape[2] * data.shape[3] + total_pixels = datashape[2] * datashape[3] output_model.meta.exposure.primary_cosmic_rays = 1000 * number_crs / (total_time * total_pixels) output_model.meta.exposure.extended_emission_events = 1e6 * number_extended_events /\ (total_time * total_pixels) diff --git a/jwst/jump/jump_step.py b/jwst/jump/jump_step.py index 8670a44d31..86f2b33f23 100755 --- a/jwst/jump/jump_step.py +++ b/jwst/jump/jump_step.py @@ -55,18 +55,22 @@ class JumpStep(Step): class_alias = 'jump' - def process(self, input): + def process(self, step_input): + + # Open the input data model + with datamodels.RampModel(step_input) as input_model: - with datamodels.RampModel(input) as input_model: tstart = time.time() # Check for an input model with NGROUPS<=2 ngroups = input_model.data.shape[1] if ngroups <= 2: self.log.warning('Cannot apply jump detection when NGROUPS<=2;') self.log.warning('Jump step will be skipped') - result = input_model.copy() - result.meta.cal_step.jump = 'SKIPPED' - return result + input_model.meta.cal_step.jump = 'SKIPPED' + return input_model + + # Work on a copy + result = input_model.copy() # Retrieve the parameter values rej_thresh = self.rejection_threshold @@ -91,18 +95,18 @@ def process(self, input): self.log.info('Maximum cores to use = %s', max_cores) # Get the gain and readnoise reference files - gain_filename = self.get_reference_file(input_model, 'gain') + gain_filename = self.get_reference_file(result, 'gain') self.log.info('Using GAIN reference file: %s', gain_filename) gain_model = datamodels.GainModel(gain_filename) - readnoise_filename = self.get_reference_file(input_model, + readnoise_filename = self.get_reference_file(result, 'readnoise') self.log.info('Using READNOISE reference file: %s', readnoise_filename) readnoise_model = datamodels.ReadnoiseModel(readnoise_filename) # Call the jump detection routine - result = run_detect_jumps(input_model, gain_model, readnoise_model, + result = run_detect_jumps(result, gain_model, readnoise_model, rej_thresh, three_grp_rej_thresh, four_grp_rej_thresh, max_cores, max_jump_to_flag_neighbors, min_jump_to_flag_neighbors, flag_4_neighbors, @@ -131,11 +135,13 @@ def process(self, input): ) - gain_model.close() - readnoise_model.close() tstop = time.time() self.log.info('The execution time in seconds: %f', tstop - tstart) - result.meta.cal_step.jump = 'COMPLETE' + result.meta.cal_step.jump = 'COMPLETE' + + # Cleanup + del gain_model + del readnoise_model return result diff --git a/jwst/lastframe/lastframe_step.py b/jwst/lastframe/lastframe_step.py index 27ff6cc172..9c79370396 100755 --- a/jwst/lastframe/lastframe_step.py +++ b/jwst/lastframe/lastframe_step.py @@ -1,5 +1,4 @@ from stdatamodels.jwst import datamodels - from ..stpipe import Step from . import lastframe_sub @@ -15,20 +14,27 @@ class LastFrameStep(Step): class_alias = "lastframe" - def process(self, input): + spec = """ + """ + + def process(self, step_input): # Open the input data model - with datamodels.RampModel(input) as input_model: + with datamodels.RampModel(step_input) as input_model: # check the data is MIRI data detector = input_model.meta.instrument.detector - if detector[:3] == 'MIR': - # Do the lastframe correction subtraction - result = lastframe_sub.do_correction(input_model) - else: + + if detector[:3] != 'MIR': self.log.warning('Last Frame Correction is only for MIRI data') self.log.warning('Last frame step will be skipped') - result = input_model.copy() - result.meta.cal_step.lastframe = 'SKIPPED' + input_model.meta.cal_step.lastframe = 'SKIPPED' + return input_model + + # Work on a copy + result = input_model.copy() + + # Do the lastframe correction subtraction + result = lastframe_sub.do_correction(result) return result diff --git a/jwst/lastframe/lastframe_sub.py b/jwst/lastframe/lastframe_sub.py index 4a6dacd2af..d507261bb1 100644 --- a/jwst/lastframe/lastframe_sub.py +++ b/jwst/lastframe/lastframe_sub.py @@ -10,7 +10,7 @@ log.setLevel(logging.DEBUG) -def do_correction(input_model): +def do_correction(output): """ Short Summary ------------- @@ -19,7 +19,7 @@ def do_correction(input_model): Parameters ---------- - input_model: data model object + output: data model object science data to be corrected Returns @@ -30,10 +30,7 @@ def do_correction(input_model): """ # Save some data params for easy use later - sci_ngroups = input_model.data.shape[1] - - # Create output as a copy of the input science data model - output = input_model.copy() + sci_ngroups = output.data.shape[1] # Update the step status, and if ngroups > 2, set all of the GROUPDQ in # the final group to 'DO_NOT_USE' diff --git a/jwst/lastframe/tests/test_lastframe.py b/jwst/lastframe/tests/test_lastframe.py index 590fc73885..29d0090ad4 100755 --- a/jwst/lastframe/tests/test_lastframe.py +++ b/jwst/lastframe/tests/test_lastframe.py @@ -25,8 +25,9 @@ def test_lastframe_set_groupdq(): # create a JWST datamodel for MIRI data dm_ramp = RampModel(data=data, groupdq=groupdq) - # run the last frame correction step - dm_ramp_lastframe = do_correction(dm_ramp) + # run the last frame correction step on a copy of the input datamodel (the detection to make the + # copy or not would have happened at _step.py + dm_ramp_lastframe = do_correction(dm_ramp.copy()) # check that the difference in the groupdq flags is equal to # the 'do_not_use' flag diff --git a/jwst/linearity/linearity.py b/jwst/linearity/linearity.py index 21c3fc9813..b5af9ebf21 100644 --- a/jwst/linearity/linearity.py +++ b/jwst/linearity/linearity.py @@ -10,27 +10,26 @@ log.setLevel(logging.DEBUG) -def do_correction(input_model, lin_model): +def do_correction(output_model, lin_model): # Create the output model as a copy of the input - output_model = input_model.copy() zframe = None if output_model.meta.exposure.zero_frame: zframe = output_model.zeroframe # Get dq arrays - pdq = input_model.pixeldq - gdq = input_model.groupdq + pdq = output_model.pixeldq + gdq = output_model.groupdq # If the input data does not have an expanded DQ array, create one - if len(input_model.groupdq) == 0: - gdq = (input_model.data * 0).astype(np.uint32) + if len(output_model.groupdq) == 0: + gdq = (output_model.data * 0).astype(np.uint32) # Obtain linearity coefficients and dq array from reference file - if reffile_utils.ref_matches_sci(input_model, lin_model): + if reffile_utils.ref_matches_sci(output_model, lin_model): lin_coeffs = lin_model.coeffs lin_dq = lin_model.dq else: - sub_lin_model = reffile_utils.get_subarray_model(input_model, lin_model) + sub_lin_model = reffile_utils.get_subarray_model(output_model, lin_model) lin_coeffs = sub_lin_model.coeffs.copy() lin_dq = sub_lin_model.dq.copy() sub_lin_model.close() diff --git a/jwst/linearity/linearity_step.py b/jwst/linearity/linearity_step.py index 29b6c94b45..a4934a1ec6 100644 --- a/jwst/linearity/linearity_step.py +++ b/jwst/linearity/linearity_step.py @@ -14,12 +14,15 @@ class LinearityStep(Step): class_alias = "linearity" + spec = """ + """ + reference_file_types = ['linearity'] - def process(self, input): + def process(self, step_input): # Open the input data model - with datamodels.RampModel(input) as input_model: + with datamodels.RampModel(step_input) as input_model: # Get the name of the linearity reference file to use self.lin_name = self.get_reference_file(input_model, 'linearity') @@ -29,18 +32,20 @@ def process(self, input): if self.lin_name == 'N/A': self.log.warning('No Linearity reference file found') self.log.warning('Linearity step will be skipped') - result = input_model.copy() - result.meta.cal_step.linearity = 'SKIPPED' - return result + input_model.meta.cal_step.linearity = 'SKIPPED' + return input_model # Open the linearity reference file data model lin_model = datamodels.LinearityModel(self.lin_name) - # Do the linearity correction - result = linearity.do_correction(input_model, lin_model) + # Work on a copy + result = input_model.copy() - # Close the reference file and update the step status - lin_model.close() + # Do the linearity correction + result = linearity.do_correction(result, lin_model) result.meta.cal_step.linearity = 'COMPLETE' + # Cleanup + del lin_model + return result diff --git a/jwst/persistence/persistence_step.py b/jwst/persistence/persistence_step.py index a69090973b..daa006fb0a 100644 --- a/jwst/persistence/persistence_step.py +++ b/jwst/persistence/persistence_step.py @@ -19,85 +19,89 @@ class PersistenceStep(Step): flag_pers_cutoff = float(default=40.) # Pixels with persistence correction >= this value in DN will be flagged in the DQ save_persistence = boolean(default=False) # Save subtracted persistence to an output file with suffix '_output_pers' save_trapsfilled = boolean(default=True) # Save updated trapsfilled file with suffix '_trapsfilled' + modify_input = boolean(default=False) """ reference_file_types = ["trapdensity", "trappars", "persat"] - def process(self, input): + def process(self, step_input): if self.input_trapsfilled is not None: if (self.input_trapsfilled == "None" or len(self.input_trapsfilled) == 0): self.input_trapsfilled = None - output_obj = datamodels.RampModel(input).copy() - - self.trap_density_filename = self.get_reference_file(output_obj, - "trapdensity") - self.trappars_filename = self.get_reference_file(output_obj, - "trappars") - self.persat_filename = self.get_reference_file(output_obj, "persat") - - # Is any reference file missing? - missing = False - missing_reftypes = [] - if self.persat_filename == "N/A": - missing = True - missing_reftypes.append("PERSAT") - if self.trap_density_filename == "N/A": - missing = True - missing_reftypes.append("TRAPDENSITY") - if self.trappars_filename == "N/A": - missing = True - missing_reftypes.append("TRAPPARS") - if missing: - if len(missing_reftypes) == 1: - msg = "Missing reference file type: " + missing_reftypes[0] + with datamodels.RampModel(step_input) as input_model: + + self.trap_density_filename = self.get_reference_file(input_model, + "trapdensity") + self.trappars_filename = self.get_reference_file(input_model, + "trappars") + self.persat_filename = self.get_reference_file(input_model, "persat") + + # Is any reference file missing? + missing = False + missing_reftypes = [] + if self.persat_filename == "N/A": + missing = True + missing_reftypes.append("PERSAT") + if self.trap_density_filename == "N/A": + missing = True + missing_reftypes.append("TRAPDENSITY") + if self.trappars_filename == "N/A": + missing = True + missing_reftypes.append("TRAPPARS") + if missing: + if len(missing_reftypes) == 1: + msg = "Missing reference file type: " + missing_reftypes[0] + else: + msg = "Missing reference file types: " + for name in missing_reftypes: + msg += (" " + name) + self.log.warning("%s", msg) + input_model.meta.cal_step.persistence = "SKIPPED" + return input_model + + # Work on a copy + result = input_model.copy() + + if self.input_trapsfilled is None: + traps_filled_model = None else: - msg = "Missing reference file types: " - for name in missing_reftypes: - msg += (" " + name) - self.log.warning("%s", msg) - output_obj.meta.cal_step.persistence = "SKIPPED" - return output_obj - - if self.input_trapsfilled is None: - traps_filled_model = None - else: - traps_filled_model = datamodels.TrapsFilledModel( - self.input_trapsfilled) - trap_density_model = datamodels.TrapDensityModel( - self.trap_density_filename) - trappars_model = datamodels.TrapParsModel(self.trappars_filename) - persat_model = datamodels.PersistenceSatModel(self.persat_filename) - - pers_a = persistence.DataSet(output_obj, traps_filled_model, - self.flag_pers_cutoff, - self.save_persistence, - trap_density_model, trappars_model, - persat_model) - (output_obj, traps_filled, output_pers, skipped) = pers_a.do_all() - if skipped: - output_obj.meta.cal_step.persistence = 'SKIPPED' - else: - output_obj.meta.cal_step.persistence = 'COMPLETE' - - if traps_filled_model is not None: # input traps_filled - traps_filled_model.close() - if traps_filled is not None: # output traps_filled - # Save the traps_filled image with suffix 'trapsfilled'. - self.save_model( - traps_filled, suffix='trapsfilled', force=self.save_trapsfilled - ) - traps_filled.close() - - if output_pers is not None: # output file of persistence - self.save_model(output_pers, suffix='output_pers') - output_pers.close() - - # Close reference files. - trap_density_model.close() - trappars_model.close() - persat_model.close() - - return output_obj + traps_filled_model = datamodels.TrapsFilledModel( + self.input_trapsfilled) + trap_density_model = datamodels.TrapDensityModel( + self.trap_density_filename) + trappars_model = datamodels.TrapParsModel(self.trappars_filename) + persat_model = datamodels.PersistenceSatModel(self.persat_filename) + + pers_a = persistence.DataSet(result, traps_filled_model, + self.flag_pers_cutoff, + self.save_persistence, + trap_density_model, trappars_model, + persat_model) + (result, traps_filled, output_pers, skipped) = pers_a.do_all() + if skipped: + result.meta.cal_step.persistence = 'SKIPPED' + else: + result.meta.cal_step.persistence = 'COMPLETE' + + if traps_filled_model is not None: # input traps_filled + del traps_filled_model + if traps_filled is not None: # output traps_filled + # Save the traps_filled image with suffix 'trapsfilled'. + self.save_model( + traps_filled, suffix='trapsfilled', force=self.save_trapsfilled + ) + del traps_filled + + if output_pers is not None: # output file of persistence + self.save_model(output_pers, suffix='output_pers') + output_pers.close() + + # Cleanup + del trap_density_model + del trappars_model + del persat_model + + return result diff --git a/jwst/pipeline/calwebb_detector1.py b/jwst/pipeline/calwebb_detector1.py index 78699ff12f..afc3bc5ab3 100644 --- a/jwst/pipeline/calwebb_detector1.py +++ b/jwst/pipeline/calwebb_detector1.py @@ -176,4 +176,4 @@ def setup_output(self, input): if input.meta.cal_step.ramp_fit == 'COMPLETE': self.suffix = 'rate' else: - self.suffix = 'ramp' + self.suffix = 'ramp' \ No newline at end of file diff --git a/jwst/ramp_fitting/ramp_fit_step.py b/jwst/ramp_fitting/ramp_fit_step.py index 12c34b29df..a374e53663 100644 --- a/jwst/ramp_fitting/ramp_fit_step.py +++ b/jwst/ramp_fitting/ramp_fit_step.py @@ -1,5 +1,4 @@ #! /usr/bin/env python - import numpy as np from stcal.ramp_fitting import ramp_fit @@ -18,7 +17,6 @@ from ..lib import reffile_utils import logging -import copy import warnings log = logging.getLogger(__name__) @@ -65,7 +63,7 @@ def get_reference_file_subarrays(model, readnoise_model, gain_model, nframes): gain_2d = reffile_utils.get_subarray_data(model, gain_model) if reffile_utils.ref_matches_sci(model, readnoise_model): - readnoise_2d = readnoise_model.data.copy() + readnoise_2d = readnoise_model.data else: log.info('Extracting readnoise subarray to match science data') readnoise_2d = reffile_utils.get_subarray_data(model, readnoise_model) @@ -409,12 +407,17 @@ class RampFitStep(Step): reference_file_types = ['readnoise', 'gain'] - def process(self, input): + def process(self, step_input): + + # Open the input data model + with datamodels.RampModel(step_input) as input_model: + + # Cork on a copy + result = input_model.copy() - with datamodels.RampModel(input) as input_model: max_cores = self.maximum_cores - readnoise_filename = self.get_reference_file(input_model, 'readnoise') - gain_filename = self.get_reference_file(input_model, 'gain') + readnoise_filename = self.get_reference_file(result, 'readnoise') + gain_filename = self.get_reference_file(result, 'gain') ngroups = input_model.data.shape[1] if self.algorithm.upper() == "LIKELY" and ngroups < LIKELY_MIN_NGROUPS: @@ -435,12 +438,12 @@ def process(self, input): # available later in the gain_scale step, which avoids having to # load the gain ref file again in that step. if gain_model.meta.exposure.gain_factor is not None: - input_model.meta.exposure.gain_factor = gain_model.meta.exposure.gain_factor + result.meta.exposure.gain_factor = gain_model.meta.exposure.gain_factor # Get gain arrays, subarrays if desired. - frames_per_group = input_model.meta.exposure.nframes + frames_per_group = result.meta.exposure.nframes readnoise_2d, gain_2d = get_reference_file_subarrays( - input_model, readnoise_model, gain_model, frames_per_group) + result, readnoise_model, gain_model, frames_per_group) log.info(f"Using algorithm = {self.algorithm}") log.info(f"Using weighting = {self.weighting}") @@ -449,16 +452,17 @@ def process(self, input): if self.algorithm == "GLS": buffsize //= 10 - int_times = input_model.int_times + int_times = result.int_times # Before the ramp_fit() call, copy the input model ("_W" for weighting) # for later reconstruction of the fitting array tuples. - input_model_W = copy.copy(input_model) + input_model_W = result.copy() + # Run ramp_fit(), ignoring all DO_NOT_USE groups, and return the # ramp fitting arrays for the ImageModel, the CubeModel, and the # RampFitOutputModel. image_info, integ_info, opt_info, gls_opt_model = ramp_fit.ramp_fit( - input_model, buffsize, self.save_opt, readnoise_2d, gain_2d, + result, buffsize, self.save_opt, readnoise_2d, gain_2d, self.algorithm, self.weighting, max_cores, dqflags.pixel, suppress_one_group=self.suppress_one_group) @@ -478,7 +482,7 @@ def process(self, input): gdq[where_sat] = np.bitwise_or(gdq[where_sat], dqflags.group['DO_NOT_USE']) # Get group_time for readnoise variance calculation - group_time = input_model.meta.exposure.group_time + group_time = result.meta.exposure.group_time # Using the modified GROUPDQ array, create new readnoise variance arrays image_var_RN, integ_var_RN, opt_var_RN = \ @@ -511,7 +515,7 @@ def process(self, input): # Save the OLS optional fit product, if it exists. if opt_info is not None: - opt_model = create_optional_results_model(input_model, opt_info) + opt_model = create_optional_results_model(result, opt_info) self.save_model(opt_model, 'fitopt', output_file=self.opt_name) ''' # GLS removed from code, since it's not implemented right now. @@ -525,19 +529,22 @@ def process(self, input): out_model, int_model = None, None # Create models from possibly updated info if image_info is not None and integ_info is not None: - out_model = create_image_model(input_model, image_info) + out_model = create_image_model(result, image_info) out_model.meta.bunit_data = 'DN/s' out_model.meta.bunit_err = 'DN/s' out_model.meta.cal_step.ramp_fit = 'COMPLETE' - if ((input_model.meta.exposure.type in ['NRS_IFU', 'MIR_MRS']) or - (input_model.meta.exposure.type in ['NRS_AUTOWAVE', 'NRS_LAMP'] and - input_model.meta.instrument.lamp_mode == 'IFU')): + if ((result.meta.exposure.type in ['NRS_IFU', 'MIR_MRS']) or + (result.meta.exposure.type in ['NRS_AUTOWAVE', 'NRS_LAMP'] and + result.meta.instrument.lamp_mode == 'IFU')): out_model = datamodels.IFUImageModel(out_model) - int_model = create_integration_model(input_model, integ_info, int_times) + int_model = create_integration_model(result, integ_info, int_times) int_model.meta.bunit_data = 'DN/s' int_model.meta.bunit_err = 'DN/s' int_model.meta.cal_step.ramp_fit = 'COMPLETE' + # Cleanup + del result + return out_model, int_model diff --git a/jwst/refpix/irs2_subtract_reference.py b/jwst/refpix/irs2_subtract_reference.py index 7277a3cfce..b05261538e 100644 --- a/jwst/refpix/irs2_subtract_reference.py +++ b/jwst/refpix/irs2_subtract_reference.py @@ -10,13 +10,13 @@ log.setLevel(logging.DEBUG) -def correct_model(input_model, irs2_model, scipix_n_default=16, refpix_r_default=4, +def correct_model(output_model, irs2_model, scipix_n_default=16, refpix_r_default=4, pad=8, preserve_refpix=False): """Correct an input NIRSpec IRS2 datamodel using reference pixels. Parameters ---------- - input_model: ramp model + output_model: ramp model The input science data model. irs2_model: IRS2 model @@ -76,9 +76,9 @@ def correct_model(input_model, irs2_model, scipix_n_default=16, refpix_r_default # Copy in SCI and PIXELDQ arrays for now; that's all we need. The rest # of the input model will be copied to output at the end of the step. - data = input_model.data.copy() - pixeldq = input_model.pixeldq.copy() - input_model.meta.cal_step.refpix = 'not specified yet' + data = output_model.data.copy() + pixeldq = output_model.pixeldq.copy() + output_model.meta.cal_step.refpix = 'not specified yet' # Load the reference file data. # The reference file data are complex, but they're stored as float, with @@ -90,8 +90,8 @@ def correct_model(input_model, irs2_model, scipix_n_default=16, refpix_r_default if nrows != expected_nrows: log.error("Number of rows in reference file = {}," " but it should be {}.".format(nrows, expected_nrows)) - input_model.meta.cal_step.refpix = 'SKIPPED' - return input_model + output_model.meta.cal_step.refpix = 'SKIPPED' + return output_model alpha = np.ones((4, nrows // 2), dtype=np.complex64) beta = np.zeros((4, nrows // 2), dtype=np.complex64) @@ -105,20 +105,20 @@ def correct_model(input_model, irs2_model, scipix_n_default=16, refpix_r_default beta[2, :] = float_to_complex(irs2_model.irs2_table.field("beta_2")) beta[3, :] = float_to_complex(irs2_model.irs2_table.field("beta_3")) - scipix_n = input_model.meta.exposure.nrs_normal + scipix_n = output_model.meta.exposure.nrs_normal if scipix_n is None: log.warning("Keyword NRS_NORM not found; using default value %d" % scipix_n_default) scipix_n = scipix_n_default - refpix_r = input_model.meta.exposure.nrs_reference + refpix_r = output_model.meta.exposure.nrs_reference if refpix_r is None: log.warning("Keyword NRS_REF not found; using default value %d" % refpix_r_default) refpix_r = refpix_r_default # Convert from sky (DMS) orientation to detector orientation. - detector = input_model.meta.instrument.detector + detector = output_model.meta.instrument.detector if detector == "NRS1": data = np.swapaxes(data, 2, 3) pixeldq = np.swapaxes(pixeldq, 0, 1) @@ -176,7 +176,6 @@ def correct_model(input_model, irs2_model, scipix_n_default=16, refpix_r_default data[integ, :, :, :] = data0 # Convert corrected data back to sky orientation - output_model = input_model.copy() if not preserve_refpix: temp_data = data[:, :, :, nx - ny:] else: diff --git a/jwst/refpix/refpix_step.py b/jwst/refpix/refpix_step.py index 1e1df78328..e7000ba610 100644 --- a/jwst/refpix/refpix_step.py +++ b/jwst/refpix/refpix_step.py @@ -29,17 +29,19 @@ class RefPixStep(Step): reference_file_types = ['refpix'] - def process(self, input): + def process(self, step_input): - # Load the input science data - with datamodels.RampModel(input) as input_model: + # Open the input data model + with datamodels.RampModel(step_input) as input_model: - if pipe_utils.is_irs2(input_model): + # Work on a copy + result = input_model.copy() + + if pipe_utils.is_irs2(result): # Flag bad reference pixels first - datamodel = input_model.copy() irs2_subtract_reference.flag_bad_refpix( - datamodel, n_sigma=self.ovr_corr_mitigation_ftr, flag_only=True) + result, n_sigma=self.ovr_corr_mitigation_ftr, flag_only=True) # If desired, do the normal refpix correction before IRS2, without # side pixel handling @@ -48,39 +50,38 @@ def process(self, input): self.log.info('Turning off side pixel correction for IRS2') self.use_side_ref_pixels = False reference_pixels.correct_model( - datamodel, self.odd_even_columns, self.use_side_ref_pixels, + result, self.odd_even_columns, self.use_side_ref_pixels, self.side_smoothing_length, self.side_gain, self.odd_even_rows) # Now that values are updated, replace bad reference pixels - irs2_subtract_reference.flag_bad_refpix(datamodel, replace_only=True) + irs2_subtract_reference.flag_bad_refpix(result, replace_only=True) # Get the necessary refpix reference file for IRS2 correction - self.irs2_name = self.get_reference_file(datamodel, 'refpix') + self.irs2_name = self.get_reference_file(result, 'refpix') self.log.info(f'Using refpix reference file: {self.irs2_name}') # Check for a valid reference file if self.irs2_name == 'N/A': self.log.warning('No refpix reference file found') self.log.warning('RefPix step will be skipped') - datamodel.meta.cal_step.refpix = 'SKIPPED' - return datamodel + result.meta.cal_step.refpix = 'SKIPPED' + return result # Load the reference file into a datamodel irs2_model = datamodels.IRS2Model(self.irs2_name) # Apply the IRS2 correction scheme result = irs2_subtract_reference.correct_model( - datamodel, irs2_model, preserve_refpix=self.preserve_irs2_refpix) + result, irs2_model, preserve_refpix=self.preserve_irs2_refpix) if result.meta.cal_step.refpix != 'SKIPPED': result.meta.cal_step.refpix = 'COMPLETE' - irs2_model.close() + del irs2_model return result else: # Not an NRS IRS2 exposure. Do the normal refpix correction. - datamodel = input_model.copy() - status = reference_pixels.correct_model(datamodel, + status = reference_pixels.correct_model(result, self.odd_even_columns, self.use_side_ref_pixels, self.side_smoothing_length, @@ -88,13 +89,14 @@ def process(self, input): self.odd_even_rows) if status == reference_pixels.REFPIX_OK: - datamodel.meta.cal_step.refpix = 'COMPLETE' + result.meta.cal_step.refpix = 'COMPLETE' elif status == reference_pixels.SUBARRAY_DOESNTFIT: self.log.warning("Subarray doesn't fit in full-sized array") - datamodel.meta.cal_step.refpix = 'SKIPPED' + result.meta.cal_step.refpix = 'SKIPPED' elif status == reference_pixels.BAD_REFERENCE_PIXELS: self.log.warning("No valid reference pixels, refpix step skipped") - datamodel.meta.cal_step.refpix = 'SKIPPED' + result.meta.cal_step.refpix = 'SKIPPED' elif status == reference_pixels.SUBARRAY_SKIPPED: - datamodel.meta.cal_step.refpix = 'SKIPPED' - return datamodel + result.meta.cal_step.refpix = 'SKIPPED' + + return result diff --git a/jwst/regtest/test_miri_image.py b/jwst/regtest/test_miri_image.py index 9d204b89ee..2d6f46061f 100644 --- a/jwst/regtest/test_miri_image.py +++ b/jwst/regtest/test_miri_image.py @@ -2,6 +2,8 @@ from astropy.io.fits.diff import FITSDiff from numpy.testing import assert_allclose from gwcs.wcstools import grid_from_bounding_box +import tracemalloc +import numpy as np from stdatamodels.jwst import datamodels @@ -113,6 +115,39 @@ def test_miri_image_detector1(run_detector1, rtdata_module, fitsdiff_default_kwa _assert_is_same(rtdata_module, fitsdiff_default_kwargs, suffix) +@pytest.mark.bigdata +def test_detector1_mem_usage(rtdata_module): + """Determine the memory usage for Detector 1""" + rtdata = rtdata_module + rtdata.get_data("miri/image/jw01024001001_04101_00001_mirimage_uncal.fits") + args = ["jwst.pipeline.Detector1Pipeline", rtdata.input] + + # starting the monitoring + tracemalloc.start() + + # run Detector1 + Step.from_cmdline(args) + + # displaying the memory + current_mem, peak_mem = tracemalloc.get_traced_memory() + # convert bytes to GB + peak_mem *= 1e-9 + peak_mem = np.round(peak_mem, decimals=1) + + # stopping the monitoring + tracemalloc.stop() + + # set comparison values in GB + mem_threshold = 16.0 # average user's available memory + mem_benchmark = 11.0 # benchmark run with build 1.15.1 + 1 additional GB + + # test that max memory is less than threshold + assert peak_mem < mem_threshold, "Max memory used is greater than 16 GB!" + + # test that max memory is less or equal to benchmark + assert peak_mem <= mem_benchmark, "Max memory used is greater than 11 GB" + + @pytest.mark.bigdata @pytest.mark.parametrize("suffix", ["dark_current", "ramp", "rate", "rateints"]) diff --git a/jwst/reset/reset_step.py b/jwst/reset/reset_step.py index 12457482aa..4ad03e7d52 100755 --- a/jwst/reset/reset_step.py +++ b/jwst/reset/reset_step.py @@ -14,43 +14,46 @@ class ResetStep(Step): class_alias = "reset" + spec = """ + """ + reference_file_types = ['reset'] - def process(self, input): + def process(self, step_input): # Open the input data model - with datamodels.open(input) as input_model: + with datamodels.open(step_input) as input_model: # check the data is MIRI data detector = input_model.meta.instrument.detector - if detector.startswith('MIR'): + if not detector.startswith('MIR'): + self.log.warning('Reset Correction is only for MIRI data') + self.log.warning('Reset step will be skipped') + input_model.meta.cal_step.reset = 'SKIPPED' + return input_model - # Get the name of the reset reference file to use - self.reset_name = self.get_reference_file(input_model, 'reset') - self.log.info('Using RESET reference file %s', self.reset_name) + # Get the name of the reset reference file to use + self.reset_name = self.get_reference_file(input_model, 'reset') + self.log.info('Using RESET reference file %s', self.reset_name) - # Check for a valid reference file - if self.reset_name == 'N/A': - self.log.warning('No RESET reference file found') - self.log.warning('Reset step will be skipped') - result = input_model.copy() - result.meta.cal_step.reset = 'SKIPPED' - return result + # Check for a valid reference file + if self.reset_name == 'N/A': + self.log.warning('No RESET reference file found') + self.log.warning('Reset step will be skipped') + input_model.meta.cal_step.reset = 'SKIPPED' + return input_model - # Open the reset ref file data model - reset_model = datamodels.ResetModel(self.reset_name) + # Open the reset ref file data model + reset_model = datamodels.ResetModel(self.reset_name) - # Do the reset correction subtraction - result = reset_sub.do_correction(input_model, reset_model) + # Work on a copy + result = input_model.copy() - # Close the reference file and update the step status - reset_model.close() - result.meta.cal_step.reset = 'COMPLETE' + # Do the reset correction subtraction + result = reset_sub.do_correction(result, reset_model) + result.meta.cal_step.reset = 'COMPLETE' - else: - self.log.warning('Reset Correction is only for MIRI data') - self.log.warning('Reset step will be skipped') - result = input_model.copy() - result.meta.cal_step.reset = 'SKIPPED' + # Cleanup + del reset_model return result diff --git a/jwst/reset/reset_sub.py b/jwst/reset/reset_sub.py index cc2ffae834..4dba18c5d3 100644 --- a/jwst/reset/reset_sub.py +++ b/jwst/reset/reset_sub.py @@ -8,7 +8,7 @@ log.setLevel(logging.DEBUG) -def do_correction(input_model, reset_model): +def do_correction(output_model, reset_model): """ Short Summary ------------- @@ -21,7 +21,7 @@ def do_correction(input_model, reset_model): Parameters ---------- - input_model: data model object + output_model: data model object science data to be corrected reset_model: reset model object @@ -36,10 +36,10 @@ def do_correction(input_model, reset_model): """ # Save some data params for easy use later - sci_nints = input_model.meta.exposure.nints # num ints in input data - sci_ngroups = input_model.meta.exposure.ngroups # num groups in input data - sci_integration_start = input_model.meta.exposure.integration_start - sci_integration_end = input_model.meta.exposure.integration_end + sci_nints = output_model.meta.exposure.nints # num ints in input data + sci_ngroups = output_model.meta.exposure.ngroups # num groups in input data + sci_integration_start = output_model.meta.exposure.integration_start + sci_integration_end = output_model.meta.exposure.integration_end istart = 0 iend = sci_nints @@ -55,9 +55,6 @@ def do_correction(input_model, reset_model): reset_model.data[np.isnan(reset_model.data)] = 0.0 log.debug("Reset Sub using: nints = {}, ngroups = {}".format(sci_nints, sci_ngroups)) - # Create output as a copy of the input science data model - output = input_model.copy() - # find out how many groups we are correcting # the maximum number of groups to correct is reset_ngroups igroup = sci_ngroups @@ -72,15 +69,15 @@ def do_correction(input_model, reset_model): ir = reset_nints - 1 # combine the science and reset DQ arrays - output.pixeldq = np.bitwise_or(input_model.pixeldq, reset_model.dq) + output_model.pixeldq = np.bitwise_or(output_model.pixeldq, reset_model.dq) # we are only correcting the first reset_ngroups for j in range(igroup): - output.data[i - istart, j] -= reset_model.data[ir, j] + output_model.data[i - istart, j] -= reset_model.data[ir, j] # combine the ERR arrays in quadrature # NOTE: currently stubbed out until ERR handling is decided # output.err[i,j] = np.sqrt( # output.err[i,j]**2 + reset.err[j]**2) - return output + return output_model diff --git a/jwst/reset/tests/test_reset_sub.py b/jwst/reset/tests/test_reset_sub.py index 74d2e6e86e..212c6fba65 100644 --- a/jwst/reset/tests/test_reset_sub.py +++ b/jwst/reset/tests/test_reset_sub.py @@ -154,8 +154,9 @@ def test_2_int(make_rampmodel, make_resetmodel): reset.data[0, i] = i * 0.1 reset.data[1, i] = i * 0.2 - # run correction - outfile = resetcorr(dm_ramp, reset) + # run correction on a copy of the input datamodel (the detection to make the copy + # or not would have happened at _step.py + outfile = resetcorr(dm_ramp.copy(), reset) # check that the reset file is subtracted frame by frame from the science data diff = dm_ramp.data[0] - reset.data[0, :ngroups] diff --git a/jwst/rscd/rscd_step.py b/jwst/rscd/rscd_step.py index 43eb9ddd66..98dcf3a02e 100755 --- a/jwst/rscd/rscd_step.py +++ b/jwst/rscd/rscd_step.py @@ -27,39 +27,40 @@ class RscdStep(Step): reference_file_types = ['rscd'] - def process(self, input): + def process(self, step_input): # Open the input data model - with datamodels.RampModel(input) as input_model: + with datamodels.RampModel(step_input) as input_model: # check the data is MIRI data detector = input_model.meta.instrument.detector - if detector.startswith('MIR'): + if not detector.startswith('MIR'): + self.log.warning('RSCD correction is only for MIRI data') + self.log.warning('RSCD step will be skipped') + input_model.meta.cal_step.rscd = 'SKIPPED' + return input_model - # Get the name of the rscd reference file to use - self.rscd_name = self.get_reference_file(input_model, 'rscd') - self.log.info('Using RSCD reference file %s', self.rscd_name) + # Get the name of the rscd reference file to use + self.rscd_name = self.get_reference_file(input_model, 'rscd') + self.log.info('Using RSCD reference file %s', self.rscd_name) - # Check for a valid reference file - if self.rscd_name == 'N/A': - self.log.warning('No RSCD reference file found') - self.log.warning('RSCD step will be skipped') - input_model.meta.cal_step.rscd = 'SKIPPED' - return input_model + # Check for a valid reference file + if self.rscd_name == 'N/A': + self.log.warning('No RSCD reference file found') + self.log.warning('RSCD step will be skipped') + input_model.meta.cal_step.rscd = 'SKIPPED' + return input_model - # Load the rscd ref file data model - rscd_model = datamodels.RSCDModel(self.rscd_name) + # Load the rscd ref file data model + rscd_model = datamodels.RSCDModel(self.rscd_name) - # Do the rscd correction - result = rscd_sub.do_correction(input_model, rscd_model, self.type) + # Work on a copy + result = input_model.copy() - # Close the reference file - rscd_model.close() + # Do the rscd correction + result = rscd_sub.do_correction(result, rscd_model, self.type) - else: - self.log.warning('RSCD correction is only for MIRI data') - self.log.warning('RSCD step will be skipped') - result = input_model.copy() - result.meta.cal_step.rscd = 'SKIPPED' + # Cleanup + del rscd_model return result diff --git a/jwst/rscd/rscd_sub.py b/jwst/rscd/rscd_sub.py index 47ecee38f1..305c55f320 100644 --- a/jwst/rscd/rscd_sub.py +++ b/jwst/rscd/rscd_sub.py @@ -11,7 +11,7 @@ log.setLevel(logging.DEBUG) -def do_correction(input_model, rscd_model, type): +def do_correction(output_model, rscd_model, type): """ Short Summary ------------- @@ -21,7 +21,7 @@ def do_correction(input_model, rscd_model, type): Parameters ---------- - input_model: ~jwst.datamodels.RampModel + output_model: ~jwst.datamodels.RampModel science data to be corrected rscd_model: ~jwst.datamodels.RSCDModel @@ -38,28 +38,28 @@ def do_correction(input_model, rscd_model, type): """ # Retrieve the reference parameters for this exposure type - param = get_rscd_parameters(input_model, rscd_model) + param = get_rscd_parameters(output_model, rscd_model) if not bool(param): # empty dictionary log.warning('READPATT, SUBARRAY combination not found in ref file: RSCD correction will be skipped') - input_model.meta.cal_step.rscd = 'SKIPPED' - return input_model + output_model.meta.cal_step.rscd = 'SKIPPED' + return output_model if type == 'baseline': group_skip = param['skip'] - output = correction_skip_groups(input_model, group_skip) + output_model = correction_skip_groups(output_model, group_skip) else: # enhanced algorithm is not enabled yet (updated code and validation needed) log.warning('Enhanced algorithm not support yet: RSCD correction will be skipped') - input_model.meta.cal_step.rscd = 'SKIPPED' - return input_model + output_model.meta.cal_step.rscd = 'SKIPPED' + return output_model # decay function algorithm update needed - # output = correction_decay_function(input_model, param) + # output_model = correction_decay_function(input_model, param) - return output + return output_model -def correction_skip_groups(input_model, group_skip): +def correction_skip_groups(output, group_skip): """ Short Summary ------------- @@ -68,7 +68,7 @@ def correction_skip_groups(input_model, group_skip): Parameters ---------- - input_model: ~jwst.datamodels.RampModel + output: ~jwst.datamodels.RampModel science data to be corrected group_skip: int @@ -81,15 +81,12 @@ def correction_skip_groups(input_model, group_skip): """ # Save some data params for easy use later - sci_nints = input_model.data.shape[0] # number of integrations - sci_ngroups = input_model.data.shape[1] # number of groups + sci_nints = output.data.shape[0] # number of integrations + sci_ngroups = output.data.shape[1] # number of groups log.debug("RSCD correction using: nints=%d, ngroups=%d" % (sci_nints, sci_ngroups)) - # Create output as a copy of the input science data model - output = input_model.copy() - # If ngroups <= group_skip+3, skip the flagging # the +3 is to ensure there is a slope to be fit including the flagging for # the last frame correction @@ -108,7 +105,7 @@ def correction_skip_groups(input_model, group_skip): return output -def correction_decay_function(input_model, param): +def correction_decay_function(output, param): """ Short Summary ------------- @@ -133,7 +130,7 @@ def correction_decay_function(input_model, param): Parameters ---------- - input_model: ~jwst.datamodels.RampModel + output: ~jwst.datamodels.RampModel science data to be corrected param: dict @@ -147,15 +144,12 @@ def correction_decay_function(input_model, param): """ # Save some data params for easy use later - sci_nints = input_model.data.shape[0] # number of integrations - sci_ngroups = input_model.data.shape[1] # number of groups + sci_nints = output.data.shape[0] # number of integrations + sci_ngroups = output.data.shape[1] # number of groups log.debug("RSCD correction using: nints=%d, ngroups=%d" % (sci_nints, sci_ngroups)) - # Create output as a copy of the input science data model - output = input_model.copy() - # Check for valid parameters if sci_ngroups < 2: log.warning('RSCD correction requires > 1 group per integration') @@ -208,17 +202,15 @@ def correction_decay_function(input_model, param): log.info(' Working on integration %d', i + 1) sat, dn_last23, dn_lastfit = \ - get_DNaccumulated_last_int(input_model, i, sci_ngroups) + get_DNaccumulated_last_int(output, i, sci_ngroups) lastframe_even = dn_last23[1::2, :] lastframe_odd = dn_last23[0::2, :] - correction_even = lastframe_even.copy() * 0.0 - correction_odd = lastframe_odd.copy() * 0.0 factor2_even = lastframe_even.copy() * 0.0 factor2_odd = lastframe_odd.copy() * 0.0 - a1_even = lastframe_even.copy() * 0.0 - a1_odd = lastframe_odd.copy() * 0.0 + # these will be created in the loop: correction_even, correction_odd, + # a1_even, and a1_odd counts2_even = lastframe_even - crossopt_even counts2_odd = lastframe_odd - crossopt_odd @@ -434,13 +426,8 @@ def get_DNaccumulated_last_int(input_model, i, sci_ngroups): dn_lastframe_fit: extrapolated last frame using the fit to the entire ramp """ - nrows = input_model.data.shape[2] - ncols = input_model.data.shape[3] dn_lastframe2 = input_model.data[i - 1][sci_ngroups - 2] dn_lastframe3 = input_model.data[i - 1][sci_ngroups - 3] - dn_lastframe23 = dn_lastframe2.copy() * 0.0 - dn_lastframe_fit = dn_lastframe2.copy() * 0.0 - saturated = np.full((nrows, ncols), False) diff = dn_lastframe2 - dn_lastframe3 dn_lastframe23 = dn_lastframe2 + diff diff --git a/jwst/rscd/tests/test_rscd.py b/jwst/rscd/tests/test_rscd.py index 9ef0011b89..eddec0c1a8 100644 --- a/jwst/rscd/tests/test_rscd.py +++ b/jwst/rscd/tests/test_rscd.py @@ -28,8 +28,8 @@ def test_rscd_baseline_set_groupdq(): # get the number of groups to flag nflag = 3 - # run the RSCD baseline correction step - dm_ramp_rscd = correction_skip_groups(dm_ramp, nflag) + # run the RSCD baseline correction step on a copy (the copy is created at the step script) + dm_ramp_rscd = correction_skip_groups(dm_ramp.copy(), nflag) # check that the difference in the groupdq flags is equal to # the 'do_not_use' flag for the 2nd integration @@ -81,14 +81,14 @@ def test_rscd_baseline_too_few_groups(): data = np.full(csize, 1.0, dtype=np.float32) groupdq = np.zeros(csize, dtype=np.uint8) - # create a JWST datamodel for MIRI data + # create a JWST datamodel for MIRI data on a copy (the copy is created at the step script) dm_ramp = RampModel(data=data, groupdq=groupdq) # get the number of groups to flag nflag = 3 # run the RSCD baseline correction step - dm_ramp_rscd = correction_skip_groups(dm_ramp, nflag) + dm_ramp_rscd = correction_skip_groups(dm_ramp.copy(), nflag) # test that the groupdq flags are not changed for any integration dq_diff = (dm_ramp_rscd.groupdq[:, :, :, :] diff --git a/jwst/saturation/saturation.py b/jwst/saturation/saturation.py index f75abf0003..8918d42dd9 100644 --- a/jwst/saturation/saturation.py +++ b/jwst/saturation/saturation.py @@ -22,7 +22,7 @@ ATOD_LIMIT = 65535. # Hard DN limit of 16-bit A-to-D converter -def flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): +def flag_saturation(output_model, ref_model, n_pix_grow_sat, use_readpatt): """ Short Summary ------------- @@ -30,7 +30,7 @@ def flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): Parameters ---------- - input_model : `~jwst.datamodels.RampModel` + output_model : `~jwst.datamodels.RampModel` The input science data to be corrected ref_model : `~jwst.datamodels.SaturationModel` @@ -51,24 +51,22 @@ def flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): the GROUPDQ array """ - data = input_model.data - ngroups = input_model.meta.exposure.ngroups - nframes = input_model.meta.exposure.nframes + ngroups = output_model.meta.exposure.ngroups + nframes = output_model.meta.exposure.nframes - # Create the output model as a copy of the input - output_model = input_model.copy() gdq = output_model.groupdq pdq = output_model.pixeldq + data = output_model.data - zframe = input_model.zeroframe if input_model.meta.exposure.zero_frame else None + zframe = output_model.zeroframe if output_model.meta.exposure.zero_frame else None # Extract subarray from saturation reference file, if necessary - if reffile_utils.ref_matches_sci(input_model, ref_model): + if reffile_utils.ref_matches_sci(output_model, ref_model): sat_thresh = ref_model.data sat_dq = ref_model.dq else: log.info('Extracting reference file subarray to match science data') - ref_sub_model = reffile_utils.get_subarray_model(input_model, ref_model) + ref_sub_model = reffile_utils.get_subarray_model(output_model, ref_model) sat_thresh = ref_sub_model.data.copy() sat_dq = ref_sub_model.dq.copy() ref_sub_model.close() @@ -96,7 +94,7 @@ def flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): return output_model -def irs2_flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): +def irs2_flag_saturation(output_model, ref_model, n_pix_grow_sat, use_readpatt): """ Short Summary ------------- @@ -107,7 +105,7 @@ def irs2_flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): Parameters ---------- - input_model : `~jwst.datamodels.RampModel` + output_model : `~jwst.datamodels.RampModel` The input science data to be corrected ref_model : `~jwst.datamodels.SaturationModel` @@ -128,11 +126,14 @@ def irs2_flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): the GROUPDQ array """ - data = input_model.data + # Create the output model as a copy of the input + groupdq = output_model.groupdq + + data = output_model.data nints = data.shape[0] ngroups = data.shape[1] - detector = input_model.meta.instrument.detector - nframes = input_model.meta.exposure.nframes + detector = output_model.meta.instrument.detector + nframes = output_model.meta.exposure.nframes if use_readpatt: read_pattern = [[x + 1 + groupstart * nframes for x in range(nframes)] for groupstart in range(ngroups)] @@ -140,20 +141,17 @@ def irs2_flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): else: read_pattern=None - # create a mask of the appropriate size - irs2_mask = x_irs2.make_mask(input_model) - # Create the output model as a copy of the input - output_model = input_model.copy() - groupdq = output_model.groupdq + # create a mask of the appropriate size + irs2_mask = x_irs2.make_mask(output_model) # Extract subarray from saturation reference file, if necessary - if reffile_utils.ref_matches_sci(input_model, ref_model): + if reffile_utils.ref_matches_sci(output_model, ref_model): sat_thresh = ref_model.data sat_dq = ref_model.dq else: log.info('Extracting reference file subarray to match science data') - ref_sub_model = reffile_utils.get_subarray_model(input_model, ref_model) + ref_sub_model = reffile_utils.get_subarray_model(output_model, ref_model) sat_thresh = ref_sub_model.data.copy() sat_dq = ref_sub_model.dq.copy() ref_sub_model.close() @@ -171,7 +169,7 @@ def irs2_flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): flagarray = np.zeros(data.shape[-2:], dtype=groupdq.dtype) flaglowarray = np.zeros(data.shape[-2:], dtype=groupdq.dtype) - if input_model.meta.exposure.zero_frame: + if output_model.meta.exposure.zero_frame: zflagarray = np.zeros(data.shape[-2:], dtype=groupdq.dtype) zflaglowarray = np.zeros(data.shape[-2:], dtype=groupdq.dtype) @@ -229,8 +227,8 @@ def irs2_flag_saturation(input_model, ref_model, n_pix_grow_sat, use_readpatt): # Process ZEROFRAME. Instead of setting a ZEROFRAME DQ array, data # in the ZEROFRAME that is flagged will be set to 0. - if input_model.meta.exposure.zero_frame: - zplane = input_model.zeroframe[ints, :, :] + if output_model.meta.exposure.zero_frame: + zplane = output_model.zeroframe[ints, :, :] zdq = np.zeros(groupdq.shape[-2:], dtype=groupdq.dtype) ztemp = x_irs2.from_irs2(zplane, irs2_mask, detector) diff --git a/jwst/saturation/saturation_step.py b/jwst/saturation/saturation_step.py index 849abadb5e..d4bb08e70b 100755 --- a/jwst/saturation/saturation_step.py +++ b/jwst/saturation/saturation_step.py @@ -23,10 +23,10 @@ class SaturationStep(Step): reference_file_types = ['saturation'] - def process(self, input): + def process(self, step_input): # Open the input data model - with datamodels.RampModel(input) as input_model: + with datamodels.open(step_input) as input_model: # Get the name of the saturation reference file self.ref_name = self.get_reference_file(input_model, 'saturation') @@ -36,21 +36,23 @@ def process(self, input): if self.ref_name == 'N/A': self.log.warning('No SATURATION reference file found') self.log.warning('Saturation step will be skipped') - result = input_model.copy() - result.meta.cal_step.saturation = 'SKIPPED' - return result + input_model.meta.cal_step.saturation = 'SKIPPED' + return input_model # Open the reference file data model ref_model = datamodels.SaturationModel(self.ref_name) + # Work on a copy + result = input_model.copy() + # Do the saturation check - if pipe_utils.is_irs2(input_model): - sat = saturation.irs2_flag_saturation(input_model, ref_model, self.n_pix_grow_sat, self.use_readpatt) + if pipe_utils.is_irs2(result): + result = saturation.irs2_flag_saturation(result, ref_model, self.n_pix_grow_sat, self.use_readpatt) else: - sat = saturation.flag_saturation(input_model, ref_model, self.n_pix_grow_sat, self.use_readpatt) + result = saturation.flag_saturation(result, ref_model, self.n_pix_grow_sat, self.use_readpatt) + result.meta.cal_step.saturation = 'COMPLETE' - # Close the reference file and update the step status - ref_model.close() - sat.meta.cal_step.saturation = 'COMPLETE' + # Cleanup + del ref_model - return sat + return result diff --git a/jwst/stpipe/core.py b/jwst/stpipe/core.py index 66103dfef3..02ae5eb92a 100644 --- a/jwst/stpipe/core.py +++ b/jwst/stpipe/core.py @@ -28,6 +28,7 @@ class JwstStep(Step): def _datamodels_open(cls, init, **kwargs): return datamodels.open(init, **kwargs) + def load_as_level2_asn(self, obj): """Load object as an association diff --git a/jwst/superbias/bias_sub.py b/jwst/superbias/bias_sub.py index fb26d61beb..4937d93e71 100644 --- a/jwst/superbias/bias_sub.py +++ b/jwst/superbias/bias_sub.py @@ -47,7 +47,7 @@ def do_correction(input_model, bias_model): return output_model -def subtract_bias(input, bias): +def subtract_bias(output, bias): """ Subtracts a superbias image from a science data set, subtracting the superbias from each group of each integration in the science data. @@ -56,7 +56,7 @@ def subtract_bias(input, bias): Parameters ---------- - input: data model object + output: data model object the input science data bias: superbias model object @@ -69,19 +69,18 @@ def subtract_bias(input, bias): """ - # Create output as a copy of the input science data model - output = input.copy() - # combine the science and superbias DQ arrays - output.pixeldq = np.bitwise_or(input.pixeldq, bias.dq) + output.pixeldq = np.bitwise_or(output.pixeldq, bias.dq) # Subtract the superbias image from all groups and integrations # of the science data + if not isinstance(type(output.data), float): + output.data = (output.data).astype(float) output.data -= bias.data # If ZEROFRAME is present, subtract the super bias. Zero values # indicate bad data, so should be kept zero. - if input.meta.exposure.zero_frame: + if output.meta.exposure.zero_frame: wh_zero = np.where(output.zeroframe == 0.) output.zeroframe -= bias.data output.zeroframe[wh_zero] = 0. # Zero values indicate unusable data diff --git a/jwst/superbias/superbias_step.py b/jwst/superbias/superbias_step.py index 26ddfc0758..8da93d5eed 100755 --- a/jwst/superbias/superbias_step.py +++ b/jwst/superbias/superbias_step.py @@ -15,15 +15,14 @@ class SuperBiasStep(Step): class_alias = "superbias" spec = """ - """ reference_file_types = ['superbias'] - def process(self, input): + def process(self, step_input): # Open the input data model - with datamodels.RampModel(input) as input_model: + with datamodels.open(step_input) as input_model: # Get the name of the superbias reference file to use self.bias_name = self.get_reference_file(input_model, 'superbias') @@ -33,19 +32,20 @@ def process(self, input): if self.bias_name == 'N/A': self.log.warning('No SUPERBIAS reference file found') self.log.warning('Superbias step will be skipped') - result = input_model.copy() - result.meta.cal_step.superbias = 'SKIPPED' - return result + input_model.meta.cal_step.superbias = 'SKIPPED' + return input_model # Open the superbias ref file data model bias_model = datamodels.SuperBiasModel(self.bias_name) - # Do the bias subtraction - result = bias_sub.do_correction(input_model, bias_model) + # Work on a copy + result = input_model.copy() - # Close the superbias reference file model and - # set the step status to complete - bias_model.close() + # Do the bias subtraction + result = bias_sub.do_correction(result, bias_model) result.meta.cal_step.superbias = 'COMPLETE' + # Cleanup + del bias_model + return result diff --git a/jwst/superbias/tests/test_bias_sub.py b/jwst/superbias/tests/test_bias_sub.py index b517b988df..266450df75 100755 --- a/jwst/superbias/tests/test_bias_sub.py +++ b/jwst/superbias/tests/test_bias_sub.py @@ -27,8 +27,8 @@ def test_basic_superbias_subtraction(setup_full_cube): data.data[:] = blevel bias.data[:] = blevel - # Run the pipeline - output = do_correction(data, bias) + # Run the pipeline on a copy as this is now being done in superbias_step + output = do_correction(data.copy(), bias) # Make sure that manual superbias subtraction matches pipeline output manual = data.data - bias.data @@ -54,8 +54,8 @@ def test_subarray_correction(setup_subarray_cube): data.data[:] = blevel manualbias[:] = blevel - # Run the pipeline - output = do_correction(data, bias) + # Run the pipeline on a copy as this is now being done in superbias_step + output = do_correction(data.copy(), bias) # Make sure the subarray was extracted correctly from superbias file # Make sure that manual superbias subtraction matches pipeline output @@ -106,8 +106,8 @@ def test_nans_in_superbias(setup_full_cube): # Set superbias value for pixel to NaN bias.data[500, 500] = np.nan - # Run the pipeline - output = do_correction(data, bias) + # Run the pipeline on a copy as this is now being done in superbias_step + output = do_correction(data.copy(), bias) # Check that subtraction was not done on pixel with NaN reference value # Check that subtraction was done on other pixels From 40a019e9193af0a48c5026bdacfcc1a6ae0aab11 Mon Sep 17 00:00:00 2001 From: Tyler Pauly Date: Fri, 20 Sep 2024 14:09:45 -0400 Subject: [PATCH 39/39] JP-3588: Use Pastasoss datamodel for NIRISS SOSS transform solution (#8763) --- CHANGES.rst | 6 + docs/jwst/extract_1d/arguments.rst | 5 - jwst/extract_1d/extract_1d_step.py | 10 +- jwst/extract_1d/soss_extract/atoca_utils.py | 74 --- jwst/extract_1d/soss_extract/pastasoss.py | 551 ++++++++++++++++++ .../soss_extract/soss_boxextract.py | 2 +- jwst/extract_1d/soss_extract/soss_extract.py | 376 +++++------- jwst/extract_1d/soss_extract/soss_solver.py | 508 ---------------- 8 files changed, 693 insertions(+), 839 deletions(-) create mode 100644 jwst/extract_1d/soss_extract/pastasoss.py delete mode 100644 jwst/extract_1d/soss_extract/soss_solver.py diff --git a/CHANGES.rst b/CHANGES.rst index 1a61f13086..3948e03994 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -122,6 +122,12 @@ emicorr - Removed unnecessary copies, and created a single copy at step.py level. [#8676] +extract_1d +---------- + +- Updated NIRISS SOSS extraction to utilize ``pastasoss`` + rotation solution. [#8763] + first_frame ----------- diff --git a/docs/jwst/extract_1d/arguments.rst b/docs/jwst/extract_1d/arguments.rst index dd049b4902..7b338a86f4 100644 --- a/docs/jwst/extract_1d/arguments.rst +++ b/docs/jwst/extract_1d/arguments.rst @@ -169,11 +169,6 @@ The ``extract_1d`` step has the following step-specific arguments. used when soss_wave_grid is not provided to make sure the computation time or the memory used stays reasonable. Default value is 20000. -``--soss_transform`` - This is a NIRISS-SOSS algorithm-specific parameter; this defines a rotation to - apply to the reference files to match the observation. It should be specified as - a list of three floats, with default values of None. - ``--soss_tikfac`` This is a NIRISS-SOSS algorithm-specific parameter; this is the regularization factor used in the SOSS extraction. If not specified, ATOCA will calculate a diff --git a/jwst/extract_1d/extract_1d_step.py b/jwst/extract_1d/extract_1d_step.py index 97399f2c72..f8b43b0178 100644 --- a/jwst/extract_1d/extract_1d_step.py +++ b/jwst/extract_1d/extract_1d_step.py @@ -174,14 +174,13 @@ class Extract1dStep(Step): soss_estimate = input_file(default = None) # Estimate used to generate the wavelength grid soss_rtol = float(default=1.0e-4) # Relative tolerance needed on a pixel model soss_max_grid_size = integer(default=20000) # Maximum grid size, if wave_grid not specified - soss_transform = list(default=None, min=3, max=3) # rotation applied to the ref files to match observation. soss_tikfac = float(default=None) # regularization factor for NIRISS SOSS extraction soss_width = float(default=40.) # aperture width used to extract the 1D spectrum from the de-contaminated trace. soss_bad_pix = option("model", "masking", default="masking") # method used to handle bad pixels soss_modelname = output_file(default = None) # Filename for optional model output of traces and pixel weights """ - reference_file_types = ['extract1d', 'apcorr', 'wavemap', 'spectrace', 'specprofile', 'speckernel'] + reference_file_types = ['extract1d', 'apcorr', 'pastasoss', 'specprofile', 'speckernel'] def process(self, input): """Execute the step. @@ -432,8 +431,7 @@ def process(self, input): return input_model # Load reference files. - spectrace_ref_name = self.get_reference_file(input_model, 'spectrace') - wavemap_ref_name = self.get_reference_file(input_model, 'wavemap') + pastasoss_ref_name = self.get_reference_file(input_model, 'pastasoss') specprofile_ref_name = self.get_reference_file(input_model, 'specprofile') speckernel_ref_name = self.get_reference_file(input_model, 'speckernel') @@ -444,7 +442,6 @@ def process(self, input): soss_kwargs['tikfac'] = self.soss_tikfac soss_kwargs['width'] = self.soss_width soss_kwargs['bad_pix'] = self.soss_bad_pix - soss_kwargs['transform'] = self.soss_transform soss_kwargs['subtract_background'] = self.subtract_background soss_kwargs['rtol'] = self.soss_rtol soss_kwargs['max_grid_size'] = self.soss_max_grid_size @@ -458,8 +455,7 @@ def process(self, input): # Run the extraction. result, ref_outputs, atoca_outputs = soss_extract.run_extract1d( input_model, - spectrace_ref_name, - wavemap_ref_name, + pastasoss_ref_name, specprofile_ref_name, speckernel_ref_name, subarray, diff --git a/jwst/extract_1d/soss_extract/atoca_utils.py b/jwst/extract_1d/soss_extract/atoca_utils.py index 60b26621d1..19a0144539 100644 --- a/jwst/extract_1d/soss_extract/atoca_utils.py +++ b/jwst/extract_1d/soss_extract/atoca_utils.py @@ -252,80 +252,6 @@ def get_wv_map_bounds(wave_map, dispersion_axis=1): return wave_top, wave_bottom -def check_dispersion_direction(wave_map, dispersion_axis=1, dwv_sign=-1): - """Check that the dispersion axis is increasing in the good direction - given by `dwv_sign`` - Parameters - ---------- - wave_map : array[float] - 2d-map of the pixel central wavelength - dispersion_axis : int, optional - Which axis is the dispersion axis (0 or 1) - dwv_sign : int, optional - Direction of increasing wavelengths (-1 or 1) - - Returns - ------- - bool_map : array[bool] - Boolean 2d map of the valid dispersion direction, same shape as `wave_map` - """ - - # Estimate the direction of increasing wavelength - wave_left, wave_right = get_wv_map_bounds(wave_map, dispersion_axis=dispersion_axis) - dwv = wave_right - wave_left - - # Return bool map of pixels following the good direction - bool_map = (dwv_sign * dwv >= 0) - # The bad value could be from left or right so mask both - bool_map &= np.roll(bool_map, 1, axis=dispersion_axis) - - return bool_map - - -def mask_bad_dispersion_direction(wave_map, n_max=10, fill_value=0, dispersion_axis=1, dwv_sign=-1): - """Change value of the pixels in `wave_map` that do not follow the - general dispersion direction. - - Parameters - ---------- - wave_map : array[float] - 2d-map of the pixel central wavelength - n_max : int - Maximum number of iterations - fill_value : float - Value use to replace pixels that do not follow the dispersion direction - dispersion_axis : int, optional - Which axis is the dispersion axis (0 or 1) - dwv_sign : int, optional - Direction of increasing wavelengths (-1 or 1) - - Returns - ------- - wave_map : array[float] - The corrected wave_map. - convergence flag : bool - Boolean set to True if all the pixels are now valid, False otherwise. - """ - # Do not modify the input - wave_map = wave_map.copy() - - # Make the correction iteratively - for i_try in range(n_max): - # Check which pixels are good - is_good_direction = check_dispersion_direction(wave_map, dispersion_axis, dwv_sign) - # Stop iteration if all good, or apply correction where needed. - if is_good_direction.all(): - convergence_flag = True - break - else: - wave_map[~is_good_direction] = fill_value - else: - # Did not succeed! :( - convergence_flag = False - - return wave_map, convergence_flag - - def oversample_grid(wave_grid, n_os=1): """Create an oversampled version of the input 1D wavelength grid. diff --git a/jwst/extract_1d/soss_extract/pastasoss.py b/jwst/extract_1d/soss_extract/pastasoss.py new file mode 100644 index 0000000000..4030df2224 --- /dev/null +++ b/jwst/extract_1d/soss_extract/pastasoss.py @@ -0,0 +1,551 @@ +from functools import partial +import logging +import numpy as np +from scipy.interpolate import interp1d + +log = logging.getLogger(__name__) + +SOSS_XDIM = 2048 +SOSS_YDIM = 300 +XTRACE_ORD1_LEN = SOSS_XDIM +XTRACE_ORD2_LEN = 1783 +WAVEMAP_WLMIN = 0.5 +WAVEMAP_WLMAX = 5.5 +WAVEMAP_NWL = 5001 +SUBARRAY_YMIN = 2048 - 256 + +def get_wavelengths(refmodel, x, pwcpos, order): + """Get the associated wavelength values for a given spectral order""" + if order == 1: + wavelengths = wavecal_model_order1_poly(refmodel, x, pwcpos) + elif order == 2: + wavelengths = wavecal_model_order2_poly(refmodel, x, pwcpos) + + return wavelengths + + +def min_max_scaler(x, x_min, x_max): + """ + Apply min-max scaling to input values. + + Parameters + ---------- + x : float or numpy.ndarray + The input value(s) to be scaled. + x_min : float + The minimum value in the range to which 'x' will be scaled. + x_max : float + The maximum value in the range to which 'x' will be scaled. + + Returns + ------- + float or numpy.ndarray + The scaled value(s) in the range [0, 1]. + + Notes + ----- + Min-max scaling is a data normalization technique that scales input values + 'x' to the range [0, 1] based on the provided minimum and maximum values, + 'x_min' and 'x_max'. This function is applicable to both individual values + and arrays of values. This function will use the min/max values from the + training data of the wavecal model. + """ + x_scaled = (x - x_min) / (x_max - x_min) + return x_scaled + + +def wavecal_model_order1_poly(refmodel, x, pwcpos): + """Compute order 1 wavelengths. + + Parameters + ---------- + refmodel : PastasossModel + The reference model holding the wavecal models + and scale extents + x : float or numpy.ndarray + The input pixel values for which the function + will estimate wavelengths + pwcpos : float + The position of the pupil wheel; used to determine + the difference between current and commanded position + to rotate the model + """ + x_scaler = partial( + min_max_scaler, + **{ + "x_min": refmodel.wavecal_models[0].scale_extents[0][0], + "x_max": refmodel.wavecal_models[0].scale_extents[1][0], + }, + ) + + pwcpos_offset_scaler = partial( + min_max_scaler, + **{ + "x_min": refmodel.wavecal_models[0].scale_extents[0][1], + "x_max": refmodel.wavecal_models[0].scale_extents[1][1], + }, + ) + + def get_poly_features(x, offset): + """Polynomial features for the order 1 wavecal model""" + poly_features = np.array( + [ + x, + offset, + x**2, + x * offset, + offset**2, + x**3, + x**2 * offset, + x * offset**2, + offset**3, + x**4, + x**3 * offset, + x**2 * offset**2, + x * offset**3, + offset**4, + x**5, + x**4 * offset, + x**3 * offset**2, + x**2 * offset**3, + x * offset**4, + offset**5, + ] + ) + return poly_features + + # extract model weights and intercept + coef = refmodel.wavecal_models[0].coefficients + + # get pixel columns and then scaled + x_scaled = x_scaler(x) + + # offset + offset = np.ones_like(x) * (pwcpos - refmodel.meta.pwcpos_cmd) + offset_scaled = pwcpos_offset_scaler(offset) + + # polynomial features + poly_features = get_poly_features(x_scaled, offset_scaled) + wavelengths = coef[0] + coef[1:] @ poly_features + + return wavelengths + + +def wavecal_model_order2_poly(refmodel, x, pwcpos): + """Compute order 2 wavelengths. + + Parameters + ---------- + refmodel : PastasossModel + The reference model holding the wavecal models + and scale extents + x : float or numpy.ndarray + The input pixel values for which the function + will estimate wavelengths + pwcpos : float + The position of the pupil wheel; used to determine + the difference between current and commanded position + to rotate the model + """ + x_scaler = partial( + min_max_scaler, + **{ + "x_min": refmodel.wavecal_models[1].scale_extents[0][0], + "x_max": refmodel.wavecal_models[1].scale_extents[1][0], + }, + ) + + pwcpos_offset_scaler = partial( + min_max_scaler, + **{ + "x_min": refmodel.wavecal_models[1].scale_extents[0][1], + "x_max": refmodel.wavecal_models[1].scale_extents[1][1], + }, + ) + + def get_poly_features(x, offset): + """Polynomial features for the order 2 wavecal model""" + poly_features = np.array( + [ + x, + offset, + x**2, + x * offset, + offset**2, + x**3, + x**2 * offset, + x * offset**2, + offset**3, + ] + ) + return poly_features + + # coef and intercept + coef = refmodel.wavecal_models[1].coefficients + + # get pixel columns and then scaled + x_scaled = x_scaler(x) + + offset = np.ones_like(x) * pwcpos + offset_scaled = pwcpos_offset_scaler(offset) + + # polynomial features + poly_features = get_poly_features(x_scaled, offset_scaled) + wavelengths = coef[0] + coef[1:] @ poly_features + + return wavelengths + + +def rotate(x, y, angle, origin=(0, 0), interp=True): + """ + Applies a rotation transformation to a set of 2D points. + + Parameters + ---------- + x : np.ndarray + The x-coordinates of the points to be transformed. + y : np.ndarray + The y-coordinates of the points to be transformed. + angle : float + The angle (in degrees) by which to rotate the points. + origin : Tuple[float, float], optional + The point about which to rotate the points. Default is (0, 0). + interp : bool, optional + Whether to interpolate the rotated positions onto the original x-pixel + column values. Default is True. + + Returns + ------- + Tuple[np.ndarray, np.ndarray] + The x and y coordinates of the rotated points. + + Examples + -------- + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([0, 1, 2, 3]) + >>> x_rot, y_rot = rotate(x, y, 90) + """ + + # shift to rotate about center + xy_center = np.atleast_2d(origin).T + xy = np.vstack([x, y]) + + # Rotation transform matrix + radians = np.radians(angle) + c, s = np.cos(radians), np.sin(radians) + R = np.array([[c, -s], [s, c]]) + + # apply transformation + x_new, y_new = R @ (xy - xy_center) + xy_center + + # interpolate rotated positions onto x-pixel column values (default) + if interp: + # interpolate new coordinates onto original x values and mask values + # outside of the domain of the image 0<=x<=2047 and 0<=y<=255. + y_new = interp1d(x_new, y_new, fill_value="extrapolate")(x) + mask = np.where(y_new <= 255.0) + x = x[mask] + y_new = y_new[mask] + return x, y_new + + return x_new, y_new + + +def find_spectral_order_index(refmodel, order): + """Return index of trace and wavecal dict corresponding to order + + Parameters + ---------- + refmodel : datamodel + The reference file holding traces and wavelength calibration + models, under `refmodel.traces` and `refmodel.wavecal_models` + order: int + The spectral order to find trace and wavecal model indices for. + + Returns + ------- + int + The index to provide the reference file lists of traces and wavecal + models to retrieve the arrays for the desired spectral order + """ + + for i, entry in enumerate(refmodel.traces): + if entry.spectral_order == order: + return i + + log.warning("Order not found in reference file trace list.") + return -1 + + +def get_soss_traces(refmodel, pwcpos, order, subarray, interp=True): + """Generate the traces given a pupil wheel position. + + This is the primary method for generating the gr700xd trace position given a + pupil wheel position angle provided in the FITS header under keyword + PWCPOS. The traces for a given spectral order are found by performing a + rotation transformation using the refence trace positions at the commanded + PWCPOS=245.76 degrees. This method yields sub-pixel performance and will be + improved upon in later interations as more NIRISS/SOSS observations become + available. + + Parameters + ---------- + refmodel : PastasossModel + The reference file datamodel. + pwcpos : float + The pupil wheel positions angle provided in the FITS header under + keyword PWCPOS. + order : str + The spectral order for which a trace is computed. + Order 3 is currently unsupported. + subarray : str + Name of subarray in use, typically 'SUBSTRIP96' or 'SUBSTRIP256'. + interp : bool, optional + Whether to interpolate the rotated positions onto the original x-pixel + column values. Default is True. + + Returns + ------- + Tuple[np.ndarray, np.ndarray]] + If `order` is '1', a tuple of the x and y coordinates of the rotated + points for the first spectral order. + If `order` is '2', a tuple of the x and y coordinates of the rotated + points for the second spectral order. + If `order` is '3' or a combination of '1', '2', and '3', a list of + tuples of the x and y coordinates of the rotated points for each + spectral order. + + Raises + ------ + ValueError + If `order` is not '1', '2', '3', or a combination of '1', '2', and '3'. + """ + spectral_order_index = find_spectral_order_index(refmodel, int(order)) + + if spectral_order_index < 0: + error_message = f"Order {order} is not supported at this time." + log.error(error_message) + raise ValueError(error_message) + else: + # reference trace data + x, y = refmodel.traces[spectral_order_index].trace.T.copy() + origin = refmodel.traces[spectral_order_index].pivot_x, refmodel.traces[spectral_order_index].pivot_y + + # Offset for SUBSTRIP96 + if subarray == 'SUBSTRIP96': + y -= 10 + # rotated reference trace + x_new, y_new = rotate(x, y, pwcpos - refmodel.meta.pwcpos_cmd, origin, interp=interp) + + # wavelength associated to trace at given pwcpos value + wavelengths = get_wavelengths(refmodel, x_new, pwcpos, int(order)) + + return order, x_new, y_new, wavelengths + + +def extrapolate_to_wavegrid(w_grid, wavelength, quantity): + """ + Extrapolates quantities on the right and the left of a given array of quantity + + Parameters + ---------- + w_grid : sequence + The wavelength grid to interpolate onto + wavelength : sequence + The native wavelength values of the data + quantity : sequence + The data to interpolate + + Returns + ------- + Array + The interpolated quantities + """ + sorted = np.argsort(wavelength) + q = quantity[sorted] + w = wavelength[sorted] + + # Determine the slope on the right of the array + slope_right = (q[-1] - q[-2]) / (w[-1] - w[-2]) + # extrapolate at wavelengths larger than the max on the right + indright = np.where(w_grid > w[-1])[0] + q_right = q[-1] + (w_grid[indright] - w[-1]) * slope_right + # Determine the slope on the left of the array + slope_left = (q[1] - q[0]) / (w[1] - w[0]) + # extrapolate at wavelengths smaller than the min on the left + indleft = np.where(w_grid < w[0])[0] + q_left = q[0] + (w_grid[indleft] - w[0]) * slope_left + # Construct and extrapolated array of the quantity + w = np.concatenate((w_grid[indleft], w, w_grid[indright])) + q = np.concatenate((q_left, q, q_right)) + + # resample at the w_grid everywhere + q_grid = np.interp(w_grid, w, q) + + return q_grid + + +def calc_2d_wave_map(wave_grid, x_dms, y_dms, tilt, oversample=2, padding=0, maxiter=5, dtol=1e-2): + """Compute the 2D wavelength map on the detector. + + Parameters + ---------- + wave_grid : sequence + The wavelength corresponding to the x_dms, y_dms, and tilt values. + x_dms : sequence + The trace x position on the detector in DMS coordinates. + y_dms : sequence + The trace y position on the detector in DMS coordinates. + tilt : sequence + The trace tilt angle in degrees. + oversample : int + The oversampling factor of the input coordinates. + padding : int + The native pixel padding around the edge of the detector. + maxiter : int + The maximum number of iterations used when solving for the wavelength at each pixel. + dtol : float + The tolerance of the iterative solution in pixels. + + Returns + ------- + Array + An array containing the wavelength at each pixel on the detector. + """ + os = np.copy(oversample) + xpad = np.copy(padding) + ypad = np.copy(padding) + + # No need to compute wavelengths across the entire detector, slightly larger than SUBSTRIP256 will do. + dimx, dimy = SOSS_XDIM, SOSS_YDIM + y_dms = y_dms + (dimy - SOSS_XDIM) # Adjust y-coordinate to area of interest. + + # Generate the oversampled grid of pixel coordinates. + x_vec = np.arange((dimx + 2 * xpad) * os) / os - (os - 1) / (2 * os) - xpad + y_vec = np.arange((dimy + 2 * ypad) * os) / os - (os - 1) / (2 * os) - ypad + x_grid, y_grid = np.meshgrid(x_vec, y_vec) + + # Iteratively compute the wavelength at each pixel. + delta_x = 0.0 # A shift in x represents a shift in wavelength. + for niter in range(maxiter): + + # Assume all y have same wavelength. + wave_iterated = np.interp(x_grid - delta_x, x_dms[::-1], + wave_grid[::-1]) # Invert arrays to get increasing x. + + # Compute the tilt angle at the wavelengths. + tilt_tmp = np.interp(wave_iterated, wave_grid, tilt) + + # Compute the trace position at the wavelengths. + x_estimate = np.interp(wave_iterated, wave_grid, x_dms) + y_estimate = np.interp(wave_iterated, wave_grid, y_dms) + + # Project that back to pixel coordinates. + x_iterated = x_estimate + (y_grid - y_estimate) * np.tan(np.deg2rad(tilt_tmp)) + + # Measure error between requested and iterated position. + delta_x = delta_x + (x_iterated - x_grid) + + # If the desired precision has been reached end iterations. + if np.all(np.abs(x_iterated - x_grid) < dtol): + break + + # Evaluate the final wavelength map, this time setting out-of-bounds values to NaN. + wave_map_2d = np.interp(x_grid - delta_x, x_dms[::-1], wave_grid[::-1], left=np.nan, right=np.nan) + + # Extend to full detector size. + tmp = np.full((os * (dimx + 2 * xpad), os * (dimx + 2 * xpad)), fill_value=np.nan) + tmp[-os * (dimy + 2 * ypad):] = wave_map_2d + wave_map_2d = tmp + + return wave_map_2d + + +def get_soss_wavemaps(refmodel, pwcpos, subarray, padding=False, padsize=0, spectraces=False): + """ + Generate order 1 and 2 2D wavemaps from the rotated SOSS trace positions + + Parameters + ---------- + pwcpos : float + The pupil wheel position + subarray: str + The subarray name, ['FULL', 'SUBSTRIP256', 'SUBSTRIP96'] + padding : bool + Include padding on map edges (only needed for reference files) + padsize: int + The size of the padding to include on each side + spectraces : bool + Return the interpolated spectraces as well + + Returns + ------- + Array, Array + The 2D wavemaps and corresponding 1D spectraces + """ + _, order1_x, order1_y, order1_wl = get_soss_traces(refmodel, pwcpos, order='1', subarray=subarray, interp=True) + _, order2_x, order2_y, order2_wl = get_soss_traces(refmodel, pwcpos, order='2', subarray=subarray, interp=True) + + # Make wavemap from trace center wavelengths, padding to shape (296, 2088) + wavemin = WAVEMAP_WLMIN + wavemax = WAVEMAP_WLMAX + nwave = WAVEMAP_NWL + wave_grid = np.linspace(wavemin, wavemax, nwave) + + # Extrapolate wavelengths for order 1 trace + xtrace_order1 = extrapolate_to_wavegrid(wave_grid, order1_wl, order1_x) + ytrace_order1 = extrapolate_to_wavegrid(wave_grid, order1_wl, order1_y) + spectrace_1 = np.array([xtrace_order1, ytrace_order1, wave_grid]) + + # Set cutoff for order 2 where it runs off the detector + o2_cutoff = XTRACE_ORD2_LEN + w_o2_tmp = order2_wl[:o2_cutoff] + # Subtract 8 from FULL width to avoid reference pixels + w_o2 = np.zeros(SOSS_XDIM - 8) * np.nan + w_o2[:o2_cutoff] = w_o2_tmp + y_o2_tmp = order2_y[:o2_cutoff] + y_o2 = np.zeros(SOSS_XDIM - 8) * np.nan + y_o2[:o2_cutoff] = y_o2_tmp + x_o2 = np.copy(order1_x) + + # Fill for column > 1400 with linear extrapolation + m = w_o2[o2_cutoff - 1] - w_o2[o2_cutoff - 2] + dx = np.arange(SOSS_XDIM - 8 - o2_cutoff) + 1 + w_o2[o2_cutoff:] = w_o2[o2_cutoff - 1] + m * dx + m = y_o2[o2_cutoff - 1] - y_o2[o2_cutoff - 2] + dx = np.arange(SOSS_XDIM - 8 - o2_cutoff) + 1 + y_o2[o2_cutoff:] = y_o2[o2_cutoff - 1] + m * dx + + # Extrapolate wavelengths for order 2 trace + xtrace_order2 = extrapolate_to_wavegrid(wave_grid, w_o2, x_o2) + ytrace_order2 = extrapolate_to_wavegrid(wave_grid, w_o2, y_o2) + spectrace_2 = np.array([xtrace_order2, ytrace_order2, wave_grid]) + + # Make wavemap from wavelength solution for order 1 + wavemap_1 = calc_2d_wave_map(wave_grid, xtrace_order1, ytrace_order1, np.zeros_like(xtrace_order1), oversample=1, padding=padsize) + + # Make wavemap from wavelength solution for order 2 + wavemap_2 = calc_2d_wave_map(wave_grid, xtrace_order2, ytrace_order2, np.zeros_like(xtrace_order2), oversample=1, padding=padsize) + + # Extrapolate wavemap to FULL frame + wavemap_1[:SUBARRAY_YMIN - padsize, :] = wavemap_1[SUBARRAY_YMIN - padsize] + wavemap_2[:SUBARRAY_YMIN - padsize, :] = wavemap_2[SUBARRAY_YMIN - padsize] + + # Trim to subarray + if subarray == 'SUBSTRIP256': + wavemap_1 = wavemap_1[SUBARRAY_YMIN - padsize:SOSS_XDIM + padsize, :] + wavemap_2 = wavemap_2[SUBARRAY_YMIN - padsize:SOSS_XDIM + padsize, :] + if subarray == 'SUBSTRIP96': + wavemap_1 = wavemap_1[SUBARRAY_YMIN - padsize:SUBARRAY_YMIN + 96 + padsize, :] + wavemap_2 = wavemap_2[SUBARRAY_YMIN - padsize:SUBARRAY_YMIN + 96 + padsize, :] + + # Remove padding if necessary + if not padding and padsize != 0: + wavemap_1 = wavemap_1[padsize:-padsize, padsize:-padsize] + wavemap_2 = wavemap_2[padsize:-padsize, padsize:-padsize] + + if spectraces: + return np.array([wavemap_1, wavemap_2]), np.array([spectrace_1, spectrace_2]) + + else: + return np.array([wavemap_1, wavemap_2]) diff --git a/jwst/extract_1d/soss_extract/soss_boxextract.py b/jwst/extract_1d/soss_extract/soss_boxextract.py index fc560bbe2b..f499b21d7b 100644 --- a/jwst/extract_1d/soss_extract/soss_boxextract.py +++ b/jwst/extract_1d/soss_extract/soss_boxextract.py @@ -37,7 +37,7 @@ def get_box_weights(centroid, n_pix, shape, cols=None): # Row centers of all pixels. rows = np.indices((nrows, len(cols)))[0] - # Pixels that are entierly inside the box are set to one. + # Pixels that are entirely inside the box are set to one. cond = (rows <= (centroid - 0.5 + n_pix / 2)) cond &= ((centroid + 0.5 - n_pix / 2) <= rows) weights = cond.astype(float) diff --git a/jwst/extract_1d/soss_extract/soss_extract.py b/jwst/extract_1d/soss_extract/soss_extract.py index 1a23994197..77a0284463 100644 --- a/jwst/extract_1d/soss_extract/soss_extract.py +++ b/jwst/extract_1d/soss_extract/soss_extract.py @@ -1,7 +1,7 @@ import logging import numpy as np -from scipy.interpolate import UnivariateSpline +from scipy.interpolate import UnivariateSpline, CubicSpline from stdatamodels.jwst import datamodels from stdatamodels.jwst.datamodels import dqflags, SossWaveGridModel @@ -11,25 +11,24 @@ from astropy.nddata.bitmask import bitfield_to_boolean_mask from .soss_syscor import make_background_mask, soss_background -from .soss_solver import solve_transform, transform_wavemap, transform_profile, transform_coords from .atoca import ExtractionEngine, MaskOverlapError -from .atoca_utils import (ThroughputSOSS, WebbKernel, grid_from_map, mask_bad_dispersion_direction, +from .atoca_utils import (ThroughputSOSS, WebbKernel, grid_from_map, make_combined_adaptive_grid, get_wave_p_or_m, oversample_grid) from .soss_boxextract import get_box_weights, box_extract, estim_error_nearest_data +from .pastasoss import get_soss_wavemaps, XTRACE_ORD1_LEN, XTRACE_ORD2_LEN + log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) -def get_ref_file_args(ref_files, transform): +def get_ref_file_args(ref_files): """Prepare the reference files for the extraction engine. Parameters ---------- ref_files : dict - A dictionary of the reference file DataModels. - transform : array or list - A 3-element array describing the rotation and translation to apply - to the reference files in order to match the observation. + A dictionary of the reference file DataModels, along with values + for the subarray and pwcpos, i.e. the pupil wheel position. Returns ------- @@ -38,39 +37,53 @@ def get_ref_file_args(ref_files, transform): (wavemaps, specprofiles, throughputs, kernels) """ - # The wavelength maps for order 1 and 2. - wavemap_ref = ref_files['wavemap'] - - ovs = wavemap_ref.map[0].oversampling - pad = wavemap_ref.map[0].padding - - wavemap_o1 = transform_wavemap(transform, wavemap_ref.map[0].data, ovs, pad) - wavemap_o2 = transform_wavemap(transform, wavemap_ref.map[1].data, ovs, pad) - - # Make sure all pixels follow the expected direction of the dispersion - wavemap_o1, flag_o1 = mask_bad_dispersion_direction(wavemap_o1) - wavemap_o2, flag_o2 = mask_bad_dispersion_direction(wavemap_o2) + pastasoss_ref = ref_files['pastasoss'] + pad = getattr(pastasoss_ref.traces[0], "padding", 0) + if pad > 0: + do_padding = True + else: + do_padding = False - # Warn if not all pixels were corrected - msg_warning = 'Some pixels in order {} do not follow the expected dispersion axis' - if not flag_o1: - log.warning(msg_warning.format(1)) - if not flag_o2: - log.warning(msg_warning.format(2)) + (wavemap_o1, wavemap_o2), (spectrace_o1, spectrace_o2) = \ + get_soss_wavemaps(pastasoss_ref, pwcpos=ref_files['pwcpos'], subarray=ref_files['subarray'], + padding=do_padding, padsize=pad, spectraces=True) # The spectral profiles for order 1 and 2. specprofile_ref = ref_files['specprofile'] - ovs = specprofile_ref.profile[0].oversampling - pad = specprofile_ref.profile[0].padding - specprofile_o1 = transform_profile(transform, specprofile_ref.profile[0].data, ovs, pad, norm=False) - specprofile_o2 = transform_profile(transform, specprofile_ref.profile[1].data, ovs, pad, norm=False) + specprofile_o1 = specprofile_ref.profile[0].data + specprofile_o2 = specprofile_ref.profile[1].data + + prof_shape0, prof_shape1 = specprofile_o1.shape + wavemap_shape0, wavemap_shape1 = wavemap_o1.shape + + if prof_shape0 != wavemap_shape0: + pad0 = (prof_shape0 - wavemap_shape0) // 2 + if pad0 > 0: + specprofile_o1 = specprofile_o1[pad0: -pad0, :] + specprofile_o2 = specprofile_o2[pad0: -pad0, :] + elif pad0 < 0: + wavemap_o1 = wavemap_o1[pad0: -pad0, :] + wavemap_o2 = wavemap_o2[pad0: -pad0, :] + if prof_shape1 != wavemap_shape1: + pad1 = (prof_shape1 - wavemap_shape1) // 2 + if pad1 > 0: + specprofile_o1 = specprofile_o1[:, pad1: -pad1] + specprofile_o2 = specprofile_o2[:, pad1: -pad1] + elif pad1 < 0: + wavemap_o1 = wavemap_o1[:, pad1: -pad1] + wavemap_o2 = wavemap_o2[:, pad1: -pad1] + # The throughput curves for order 1 and 2. - spectrace_ref = ref_files['spectrace'] + throughput_index_dict = dict() + for i, throughput in enumerate(pastasoss_ref.throughputs): + throughput_index_dict[throughput.spectral_order] = i - throughput_o1 = ThroughputSOSS(spectrace_ref.trace[0].data['WAVELENGTH'], spectrace_ref.trace[0].data['THROUGHPUT']) - throughput_o2 = ThroughputSOSS(spectrace_ref.trace[1].data['WAVELENGTH'], spectrace_ref.trace[1].data['THROUGHPUT']) + throughput_o1 = ThroughputSOSS(pastasoss_ref.throughputs[throughput_index_dict[1]].wavelength[:], + pastasoss_ref.throughputs[throughput_index_dict[1]].throughput[:]) + throughput_o2 = ThroughputSOSS(pastasoss_ref.throughputs[throughput_index_dict[2]].wavelength[:], + pastasoss_ref.throughputs[throughput_index_dict[2]].throughput[:]) # The spectral kernels. speckernel_ref = ref_files['speckernel'] @@ -85,13 +98,12 @@ def get_ref_file_args(ref_files, transform): # Needs the same number of columns as the detector. Put zeros where not define. wv_cent = np.zeros((1, wv_map.shape[1])) # Get central wavelength as a function of columns - col, _, wv = get_trace_1d(ref_files, transform, order) + col, _, wv = get_trace_1d(ref_files, order) wv_cent[:, col] = wv # Set invalid values to zero idx_invalid = ~np.isfinite(wv_cent) wv_cent[idx_invalid] = 0.0 centroid[order] = wv_cent - # Get kernels kernels_o1 = WebbKernel(speckernel_ref.wavelengths, speckernel_ref.kernels, centroid[1], ovs, n_pix) kernels_o2 = WebbKernel(speckernel_ref.wavelengths, speckernel_ref.kernels, centroid[2], ovs, n_pix) @@ -103,24 +115,19 @@ def get_ref_file_args(ref_files, transform): valid_wavemap = (speckernel_wv_range[0] <= wavemap_o2) & (wavemap_o2 <= speckernel_wv_range[1]) wavemap_o2 = np.where(valid_wavemap, wavemap_o2, 0.) - return [wavemap_o1, wavemap_o2], [specprofile_o1, specprofile_o2], [throughput_o1, throughput_o2], [kernels_o1, kernels_o2] + return [wavemap_o1, wavemap_o2], [specprofile_o1, specprofile_o2],\ + [throughput_o1, throughput_o2], [kernels_o1, kernels_o2] -def get_trace_1d(ref_files, transform, order, cols=None): +def get_trace_1d(ref_files, order): """Get the x, y, wavelength of the trace after applying the transform. Parameters ---------- ref_files : dict - A dictionary of the reference file DataModels. - transform : array or list - A 3-element list or array describing the rotation and translation - to apply to the reference files in order to match the - observation. + A dictionary of the reference file DataModels, along with values + for subarray and pwcpos, i.e. the pupil wheel position. order : int The spectral order for which to return the trace parameters. - cols : array[int], optional - The columns on the detector for which to compute the trace - parameters. If not given, all columns will be computed. Returns ------- @@ -128,27 +135,35 @@ def get_trace_1d(ref_files, transform, order, cols=None): The x, y and wavelength of the trace. """ - if cols is None: - xtrace = np.arange(2048) + pastasoss_ref = ref_files['pastasoss'] + pad = getattr(pastasoss_ref.traces[0], "padding", 0) + if pad > 0: + do_padding = True else: - xtrace = cols - - spectrace_ref = ref_files['spectrace'] - - # Read x, y, wavelength for the relevant order. - xref = spectrace_ref.trace[order - 1].data['X'] - yref = spectrace_ref.trace[order - 1].data['Y'] - waveref = spectrace_ref.trace[order - 1].data['WAVELENGTH'] - - # Rotate and shift the positions based on transform. - angle, xshift, yshift = transform - xrot, yrot = transform_coords(angle, xshift, yshift, xref, yref) - - # Interpolate y and wavelength to the requested columns. - sort = np.argsort(xrot) - ytrace = np.interp(xtrace, xrot[sort], yrot[sort]) - wavetrace = np.interp(xtrace, xrot[sort], waveref[sort]) - + do_padding = False + + (_, _), (spectrace_o1, spectrace_o2) = \ + get_soss_wavemaps(pastasoss_ref, pwcpos=ref_files['pwcpos'], subarray=ref_files['subarray'], + padding=do_padding, padsize=pad, spectraces=True) + if order == 1: + spectrace = spectrace_o1 + xtrace = np.arange(XTRACE_ORD1_LEN) + elif order == 2: + spectrace = spectrace_o2 + xtrace = np.arange(XTRACE_ORD2_LEN) + else: + errmsg = f"Order {order} is not covered by Pastasoss reference file!" + log.error(errmsg) + raise ValueError(errmsg) + + # CubicSpline requires monotonically increasing x arr + if spectrace[0][0] - spectrace[0][1] > 0: + spectrace = np.flip(spectrace, axis=1) + + trace_interp_y = CubicSpline(spectrace[0], spectrace[1]) + trace_interp_wave = CubicSpline(spectrace[0], spectrace[2]) + ytrace = trace_interp_y(xtrace) + wavetrace = trace_interp_wave(xtrace) return xtrace, ytrace, wavetrace @@ -204,43 +219,7 @@ def estim_flux_first_order(scidata_bkg, scierr, scimask, ref_file_args, mask_tra return estimate_spl -def _mask_wv_map_centroid_outside(wave_maps, ref_files, transform, y_max, orders=(1, 2)): - """Patch to mask wv_map when centroid outside - Parameters - ---------- - wave_maps : array or list - Wavelength maps - ref_files : dict - A dictionary of the reference file DataModels. - transform : array or list - A 3-element list or array describing the rotation and translation - to apply to the reference files in order to match the - observation. - y_max : int - Max value of column to check against centroid location - orders : tuple[int], optional - The spectral orders for each wave_map. If not specified, defaults to (1, 2). - - Returns - ------- - None - Modifies wave_maps in place to mask out values where centroid is off the detector. - """ - for wv_map, order in zip(wave_maps, orders): - # Get centroid wavelength and y position as a function of columns - _, y_pos, wv = get_trace_1d(ref_files, transform, order) - # Find min and max wavelengths with centroid inside of detector - wv = wv[np.isfinite(y_pos)] - y_pos = y_pos[np.isfinite(y_pos)] - idx_inside = (0 <= y_pos) & (y_pos <= y_max) - wv = wv[idx_inside] - # Set to zeros (mask) values outside - mask = np.isfinite(wv_map) - mask[mask] = (np.min(wv) > wv_map[mask]) | (wv_map[mask] > np.max(wv)) - wv_map[mask] = 0. - - -def get_native_grid_from_trace(ref_files, transform, spectral_order): +def get_native_grid_from_trace(ref_files, spectral_order): """ Make a 1d-grid of the pixels boundary and ready for ATOCA ExtractionEngine, based on the wavelength solution. @@ -248,22 +227,15 @@ def get_native_grid_from_trace(ref_files, transform, spectral_order): ---------- ref_files: dict A dictionary of the reference file DataModels. - transform: array_like - A 3-elemnt list or array describing the rotation and - translation to apply to the reference files in order to match the - observation. spectral_order: int The spectral order for which to return the trace parameters. - pix_center: bool - If True, use pixel center wavelength value to define the grid. - If False, use pixel boundaries. Default is False. Returns ------- Grid of the pixels boundaries at the native sampling (1d array) """ # From wavelenght solution - col, _, wave = get_trace_1d(ref_files, transform, spectral_order) + col, _, wave = get_trace_1d(ref_files, spectral_order) # Keep only valid solution ... idx_valid = np.isfinite(wave) @@ -284,7 +256,7 @@ def get_native_grid_from_trace(ref_files, transform, spectral_order): return wave, col -def get_grid_from_trace(ref_files, transform, spectral_order, n_os=1): +def get_grid_from_trace(ref_files, spectral_order, n_os=1): """ Make a 1d-grid of the pixels boundary and ready for ATOCA ExtractionEngine, based on the wavelength solution. @@ -292,10 +264,6 @@ def get_grid_from_trace(ref_files, transform, spectral_order, n_os=1): ---------- ref_files: dict A dictionary of the reference file DataModels. - transform: array_like - A 3-elemnt list or array describing the rotation and - translation to apply to the reference files in order to match the - observation. spectral_order: int The spectral order for which to return the trace parameters. Returns @@ -303,7 +271,7 @@ def get_grid_from_trace(ref_files, transform, spectral_order, n_os=1): Grid of the pixels boundaries at the native sampling (1d array) """ - wave, _ = get_native_grid_from_trace(ref_files, transform, spectral_order) + wave, _ = get_native_grid_from_trace(ref_files, spectral_order) # Use pixel boundaries instead of the center values wv_upper_bnd, wv_lower_bnd = get_wave_p_or_m(wave[None, :]) @@ -320,10 +288,10 @@ def get_grid_from_trace(ref_files, transform, spectral_order, n_os=1): return wave_grid -def make_decontamination_grid(ref_files, transform, rtol, max_grid_size, estimate, n_os, wv_range=None): +def make_decontamination_grid(ref_files, rtol, max_grid_size, estimate, n_os, wv_range=None): ''' Create the grid use for the simultaneous extraction of order 1 and 2. The grid is made by: - 1) requiring that it satifsfies the oversampling n_os + 1) requiring that it satisfies the oversampling n_os 2) trying to reach the specified tolerance for the spectral range shared between order 1 and 2 3) trying to reach the specified tolerance in the rest of spectral range The max_grid_size overrules steps 2) and 3), so the precision may not be reached if @@ -334,7 +302,7 @@ def make_decontamination_grid(ref_files, transform, rtol, max_grid_size, estimat spectral_orders = [2, 1] grids_ord = dict() for sp_ord in spectral_orders: - grids_ord[sp_ord] = get_grid_from_trace(ref_files, transform, sp_ord, n_os=n_os) + grids_ord[sp_ord] = get_grid_from_trace(ref_files, sp_ord, n_os=n_os) # Build the list of grids given to make_combined_grid. # It must be ordered in increasing priority. @@ -424,7 +392,7 @@ def f_to_spec(f_order, grid_order, ref_file_args, pixel_grid, mask, sp_ord=0): return spec -def _build_tracemodel_order(engine, ref_file_args, f_k, i_order, mask, ref_files, transform): +def _build_tracemodel_order(engine, ref_file_args, f_k, i_order, mask, ref_files): # Take only the order's specific ref_files ref_file_order = [[ref_f[i_order]] for ref_f in ref_file_args] @@ -456,7 +424,7 @@ def _build_tracemodel_order(engine, ref_file_args, f_k, i_order, mask, ref_files tracemodel_ord = model.rebuild(flux_order, fill_value=np.nan) # Build 1d spectrum integrated over pixels - pixel_wave_grid, valid_cols = get_native_grid_from_trace(ref_files, transform, sp_ord) + pixel_wave_grid, valid_cols = get_native_grid_from_trace(ref_files, sp_ord) spec_ord = f_to_spec(flux_order, grid_order, ref_file_order, pixel_wave_grid, np.all(mask, axis=0)[valid_cols], sp_ord=sp_ord) @@ -492,7 +460,7 @@ def _build_null_spec_table(wave_grid): return spec -def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, subarray, transform=None, +def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, tikfac=None, threshold=1e-4, n_os=2, wave_grid=None, estimate=None, rtol=1e-3, max_grid_size=1000000): """Perform the spectral extraction on a single image. @@ -508,27 +476,21 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s refmask : array[bool] Pixels that should never be reconstructed e.g. the reference pixels. ref_files : dict - A dictionary of the reference file DataModels. + A dictionary of the reference file DataModels, along with values for + subarray and pwcpos, i.e. the pupil wheel position. box_weights : dict A dictionary of the weights (for each order) used in the box extraction. The weights for each order are 2d arrays with the same size as the detector. - subarray : str - Subarray on which the data were recorded; one of 'SUBSTRIPT96', - 'SUBSTRIP256' or 'FULL'. - transform : array or list, optional - A 3-element list or array describing the rotation and translation to - apply to the reference files in order to match the observation. If not - specified, the transformation is computed. tikfac : float, optional The Tikhonov regularization factor used when solving for the uncontaminated flux. If not specified, the optimal Tikhonov factor is calculated. - n_os : int, optional - The oversampling factor of the wavelength grid used when solving for - the uncontaminated flux. If not specified, defaults to 5. threshold : float The threshold value for using pixels based on the spectral profile. Default value is 1e-4. + n_os : int, optional + The oversampling factor of the wavelength grid used when solving for + the uncontaminated flux. If not specified, defaults to 2. wave_grid : str or SossWaveGridModel or None Filename of reference file or SossWaveGridModel containing the wavelength grid used by ATOCA to model each pixel valid pixel of the detector. If not given, the grid is determined @@ -562,11 +524,13 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s # Init list of atoca 1d spectra spec_list = [] - # Orders to simulate - order_list = ['Order 1', 'Order 2'] + # Generate list of orders to simulate from pastasoss trace list + order_list = [] + for trace in ref_files['pastasoss'].traces: + order_list.append(f"Order {trace.spectral_order}") # Prepare the reference file arguments. - ref_file_args = get_ref_file_args(ref_files, transform) + ref_file_args = get_ref_file_args(ref_files) # Some error values are 0, we need to mask those pixels for the extraction engine. scimask = scimask | ~(scierr > 0) @@ -588,25 +552,11 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s # Generate grid based on estimate if not given if wave_grid is None: log.info(f'wave_grid not given: generating grid based on rtol={rtol}') - wave_grid = make_decontamination_grid(ref_files, transform, rtol, max_grid_size, estimate, n_os) + wave_grid = make_decontamination_grid(ref_files, rtol, max_grid_size, estimate, n_os) log.debug(f'wave_grid covering from {wave_grid.min()} to {wave_grid.max()}') else: log.info('Using previously computed or user specified wavelength grid.') -# # Use estimate to evaluate the contribution from each orders to pixels -# # (Used to determine which pixel to model later) -# ref_args_estimate = [ref_arg for ref_arg in ref_file_args] -# # No convolution needed (so give equivalent of identity) -# ref_args_estimate[3] = [np.array([1.]) for _ in order_list] -# engine_for_estimate = ExtractionEngine(*ref_args_estimate, wave_grid=wave_grid, mask_trace_profile=mask_trace_profile) -# models = {order: engine_for_estimate.rebuild(estimate, i_orders=[idx_ord], fill_value=np.nan) -# for idx_ord, order in enumerate(order_list)} -# total = np.nansum([models[order] for order in order_list], axis=0) -# total = np.where((total != 0), total, np.nan) -# contribution = {order: models[order] / total for order in order_list} - - log.debug('Extracting using transformation parameters {}'.format(transform)) - # Set the c_kwargs using the minimum value of the kernels c_kwargs = [{'thresh': webb_ker.min_value} for webb_ker in ref_file_args[3]] @@ -643,7 +593,7 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s for i_order, order in enumerate(order_list): for idx in range(len(all_tests['factors'])): f_k = all_tests['solution'][idx, :] - args = (engine, ref_file_args, f_k, i_order, global_mask, ref_files, transform) + args = (engine, ref_file_args, f_k, i_order, global_mask, ref_files) _, spec_ord = _build_tracemodel_order(*args) populate_tikho_attr(spec_ord, all_tests, idx, i_order + 1) spec_ord.meta.soss_extract1d.color_range = 'RED' @@ -672,7 +622,7 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s log.debug('Building the model image of {}.'.format(order)) - args = (engine, ref_file_args, f_k, i_order, global_mask, ref_files, transform) + args = (engine, ref_file_args, f_k, i_order, global_mask, ref_files) tracemodel_ord, spec_ord = _build_tracemodel_order(*args) spec_ord.meta.soss_extract1d.factor = tikfac spec_ord.meta.soss_extract1d.color_range = 'RED' @@ -687,7 +637,7 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s # ############################### # Model remaining part of order 2 # ############################### - if subarray != 'SUBSTRIP96': + if ref_files['subarray'] != 'SUBSTRIP96': idx_order2 = 1 order = idx_order2 + 1 order_str = 'Order 2' @@ -703,17 +653,13 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s # mask_fit |= already_modeled # Build 1d spectrum integrated over pixels - pixel_wave_grid, valid_cols = get_native_grid_from_trace(ref_files, transform, order) + pixel_wave_grid, valid_cols = get_native_grid_from_trace(ref_files, order) # Hardcode wavelength highest boundary as well. # Must overlap with lower limit in make_decontamination_grid is_in_wv_range = (pixel_wave_grid < 0.95) pixel_wave_grid, valid_cols = pixel_wave_grid[is_in_wv_range], valid_cols[is_in_wv_range] - # NOTE: This code is currently unused. - # Remove order 1 - # scidata_order2_decont = scidata_bkg - tracemodels['Order 1'] - # Range of initial tikhonov factors tikfac_log_range = np.log10(tikfac) + np.array([-2, 8]) @@ -725,7 +671,8 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s tikfac_log_range=tikfac_log_range) except MaskOverlapError: - log.error('Not enough unmasked pixels to model the remaining part of order 2. Model and spectrum will be NaN in that spectral region.') + log.error('Not enough unmasked pixels to model the remaining part of order 2.' + 'Model and spectrum will be NaN in that spectral region.') spec_ord = [_build_null_spec_table(pixel_wave_grid)] model = np.nan * np.ones_like(scidata_bkg) @@ -745,13 +692,12 @@ def model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, s return tracemodels, tikfac, logl, wave_grid, spec_list -def compute_box_weights(ref_files, transform, subarray, shape, width=40.): +def compute_box_weights(ref_files, shape, width=40.): - # Which orders to compute (for modeling, different than extraction). - if subarray == 'SUBSTRIP96': - order_list = [1, 2] - else: - order_list = [1, 2, 3] + # Generate list of orders from pastasoss trace list + order_list = [] + for trace in ref_files['pastasoss'].traces: + order_list.append(trace.spectral_order) # Extract each order from order list box_weights = dict() @@ -761,10 +707,10 @@ def compute_box_weights(ref_files, transform, subarray, shape, width=40.): # Order string-name is used more often than integer-name order = order_str[order_integer] - log.debug(f'Compute box weights for order {order}.') + log.debug(f'Compute box weights for {order}.') # Define the box aperture - xtrace, ytrace, wavelengths[order] = get_trace_1d(ref_files, transform, order_integer) + xtrace, ytrace, wavelengths[order] = get_trace_1d(ref_files, order_integer) box_weights[order] = get_box_weights(ytrace, width, shape, cols=xtrace) return box_weights, wavelengths @@ -772,7 +718,6 @@ def compute_box_weights(ref_files, transform, subarray, shape, width=40.): def decontaminate_image(scidata_bkg, tracemodels, subarray): """Perform decontamination of the image based on the trace models""" - # Which orders to extract. if subarray == 'SUBSTRIP96': order_list = [1, 2] @@ -809,7 +754,6 @@ def decontaminate_image(scidata_bkg, tracemodels, subarray): # TODO Add docstring -# TODO Add threshold like in model_image? TO use with the rough (but stable) estimate def model_single_order(data_order, err_order, ref_file_args, mask_fit, mask_rebuild, order, wave_grid, valid_cols, save_tiktests=False, tikfac_log_range=None): @@ -914,7 +858,6 @@ def throughput(wavelength): # Remove bad pixels that are not modeled for pixel number -# TODO Update docstring def extract_image(decontaminated_data, scierr, scimask, box_weights, bad_pix='model', tracemodels=None): """Perform the box-extraction on the image, while using the trace model to correct for contamination. @@ -926,16 +869,9 @@ def extract_image(decontaminated_data, scierr, scimask, box_weights, bad_pix='mo The uncertainties corresponding to the detector image. scimask : array[float] Pixel mask to apply to the detector image. - ref_files : dict - A dictionary of the reference file DataModels. - transform : array_like - A 3-element list or array describing the rotation and translation to - apply to the reference files in order to match the observation. - subarray : str - Subarray on which the data were recorded; one of 'SUBSTRIPT96', - 'SUBSTRIP256' or 'FULL'. - width : float - The width of the aperture used to extract the uncontaminated spectrum. + box_weights : dict + A dictionary of the weights (for each order) used in the box extraction. + The weights for each order are 2d arrays with the same size as the detector. bad_pix : str How to handle the bad pixels. Options are 'masking' and 'model'. 'masking' will simply mask the bad pixels, such that the number of pixels @@ -945,7 +881,7 @@ def extract_image(decontaminated_data, scierr, scimask, box_weights, bad_pix='mo Dictionary of the modeled detector images for each order. Returns ------- - wavelengths, fluxes, fluxerrs, npixels, box_weights : dict + fluxes, fluxerrs, npixels : dict Each output is a dictionary, with each extracted order as a key. """ # Init models with an empty dictionary if not given @@ -1015,7 +951,7 @@ def extract_image(decontaminated_data, scierr, scimask, box_weights, bad_pix='mo return fluxes, fluxerrs, npixels -def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name, +def run_extract1d(input_model, pastasoss_ref_name, specprofile_ref_name, speckernel_ref_name, subarray, soss_filter, soss_kwargs): """Run the spectral extraction on NIRISS SOSS data. @@ -1023,10 +959,8 @@ def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name, ---------- input_model : DataModel The input DataModel. - spectrace_ref_name : str - Name of the spectrace reference file. - wavemap_ref_name : str - Name of the wavemap reference file. + pastasoss_ref_name : str + Name of the pastasoss reference file. specprofile_ref_name : str Name of the specprofile reference file. speckernel_ref_name : str @@ -1052,25 +986,16 @@ def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name, order_str_2_int = {f'Order {order}': order for order in [1, 2, 3]} # Read the reference files. - spectrace_ref = datamodels.SpecTraceModel(spectrace_ref_name) - wavemap_ref = datamodels.WaveMapModel(wavemap_ref_name) + pastasoss_ref = datamodels.PastasossModel(pastasoss_ref_name) specprofile_ref = datamodels.SpecProfileModel(specprofile_ref_name) speckernel_ref = datamodels.SpecKernelModel(speckernel_ref_name) ref_files = dict() - ref_files['spectrace'] = spectrace_ref - ref_files['wavemap'] = wavemap_ref + ref_files['pastasoss'] = pastasoss_ref ref_files['specprofile'] = specprofile_ref ref_files['speckernel'] = speckernel_ref - - # Initialize the theta, dx, dy transform parameters - transform = soss_kwargs.pop('transform') - if transform is None: - transform = [None, None, None] - else: - transform = [float(val) for val in transform] - # Save names for logging - param_name = np.array(['theta', 'x-offset', 'y-offset']) + ref_files['subarray'] = subarray + ref_files['pwcpos'] = input_model.meta.instrument.pupil_position # Unpack wave_grid if wave_grid_in was specified. wave_grid_in = soss_kwargs['wave_grid_in'] @@ -1167,40 +1092,8 @@ def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name, scidata_bkg = scidata col_bkg = np.zeros(scidata.shape[1]) - # Determine the theta, dx, dy transform needed to match scidata trace position to ref file position. - if None in transform: - log.info('Solving for the transformation parameters.') - - # Unpack the expected order 1 & 2 positions. - spectrace_ref = ref_files['spectrace'] - xref_o1 = spectrace_ref.trace[0].data['X'] - yref_o1 = spectrace_ref.trace[0].data['Y'] - xref_o2 = spectrace_ref.trace[1].data['X'] - yref_o2 = spectrace_ref.trace[1].data['Y'] - - # Define which parameters to fit - is_fitted = np.array([value is None for value in transform]) - - # Show which parameters are fitted in log - log.info('Parameters used for fit: ' + ', '.join(param_name[is_fitted])) - log.info('Fixed parameters: ' + ', '.join(param_name[~is_fitted])) - - # Use the solver on the background subtracted image. - if subarray == 'SUBSTRIP96' or soss_filter == 'F277W': - # Use only order 1 to solve theta, dx, dy - transform = solve_transform(scidata_bkg, scimask, xref_o1, yref_o1, - soss_filter=soss_filter, is_fitted=is_fitted, - guess_transform=transform) - else: - transform = solve_transform(scidata_bkg, scimask, xref_o1, yref_o1, - xref_o2, yref_o2, is_fitted=is_fitted, - soss_filter=soss_filter, guess_transform=transform) - - string_list = [f'{name}={value}' for name, value in zip(param_name, transform)] - log.info('Measured to Reference trace position transform: ' + ', '.join(string_list)) - # Pre-compute the weights for box extraction (used in modeling and extraction) - args = (ref_files, transform, subarray, scidata_bkg.shape) + args = (ref_files, scidata_bkg.shape) box_weights, wavelengths = compute_box_weights(*args, width=soss_kwargs['width']) # Model the traces based on optics filter configuration (CLEAR or F277W) @@ -1208,7 +1101,6 @@ def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name, # Model the image. kwargs = dict() - kwargs['transform'] = transform kwargs['estimate'] = estimate kwargs['tikfac'] = soss_kwargs['tikfac'] kwargs['max_grid_size'] = soss_kwargs['max_grid_size'] @@ -1217,7 +1109,7 @@ def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name, kwargs['wave_grid'] = wave_grid kwargs['threshold'] = soss_kwargs['threshold'] - result = model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, subarray, **kwargs) + result = model_image(scidata_bkg, scierr, scimask, refmask, ref_files, box_weights, **kwargs) tracemodels, soss_kwargs['tikfac'], logl, wave_grid, spec_list = result # Add atoca spectra to multispec for output @@ -1272,19 +1164,18 @@ def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name, if i == 0: all_box_weights[order] = [] all_box_weights[order].append(box_weights[order]) - # Copy spectral data for each order into the output model. for order in fluxes.keys(): table_size = len(wavelengths[order]) out_table = np.zeros(table_size, dtype=datamodels.SpecModel().spec_table.dtype) - out_table['WAVELENGTH'] = wavelengths[order] - out_table['FLUX'] = fluxes[order] - out_table['FLUX_ERROR'] = fluxerrs[order] + out_table['WAVELENGTH'] = wavelengths[order][:table_size] + out_table['FLUX'] = fluxes[order][:table_size] + out_table['FLUX_ERROR'] = fluxerrs[order][:table_size] out_table['DQ'] = np.zeros(table_size) - out_table['BACKGROUND'] = col_bkg - out_table['NPIXELS'] = npixels[order] + out_table['BACKGROUND'] = col_bkg[:table_size] + out_table['NPIXELS'] = npixels[order][:table_size] spec = datamodels.SpecModel(spec_table=out_table) @@ -1297,9 +1188,6 @@ def run_extract1d(input_model, spectrace_ref_name, wavemap_ref_name, output_model.meta.soss_extract1d.width = soss_kwargs['width'] output_model.meta.soss_extract1d.apply_decontamination = soss_kwargs['atoca'] output_model.meta.soss_extract1d.tikhonov_factor = soss_kwargs['tikfac'] - output_model.meta.soss_extract1d.delta_x = transform[1] - output_model.meta.soss_extract1d.delta_y = transform[2] - output_model.meta.soss_extract1d.theta = transform[0] output_model.meta.soss_extract1d.oversampling = soss_kwargs['n_os'] output_model.meta.soss_extract1d.threshold = soss_kwargs['threshold'] output_model.meta.soss_extract1d.bad_pix = soss_kwargs['bad_pix'] diff --git a/jwst/extract_1d/soss_extract/soss_solver.py b/jwst/extract_1d/soss_extract/soss_solver.py deleted file mode 100644 index 7f1cded6c7..0000000000 --- a/jwst/extract_1d/soss_extract/soss_solver.py +++ /dev/null @@ -1,508 +0,0 @@ -import logging - -import numpy as np -from scipy.ndimage import shift, rotate -from scipy.optimize import minimize -import warnings - -from .soss_syscor import aperture_mask -from .soss_centroids import get_centroids_com - -log = logging.getLogger(__name__) -log.setLevel(logging.DEBUG) - - -def transform_coords(angle, xshift, yshift, xpix, ypix, cenx=1024, ceny=50): - """Apply a rotation and shift to the trace centroids positions. This - assumes that the trace centroids are already in the CV3 coordinate system. - - Parameters - ---------- - angle : float - The angle by which to rotate the coordinates, in degrees. - xshift : float - The shift to apply to the x-coordinates after rotating. - yshift : float - The shift to apply to the y-coordinates after rotating. - xpix : array[float] - The x-coordinates to be transformed. - ypix : array[float] - The y-coordinates to be transformed. - cenx : float (optional) - The x-coordinate around which to rotate. - ceny : float (optional) - The y-coordinate around which to rotate. - - Returns - ------- - xrot : array[float] - The rotated and shifted x coordinate. - yrot : array[float] - The rotated and shifted y coordinate. - """ - - # Convert to numpy arrays. - xpix = np.atleast_1d(xpix) - ypix = np.atleast_1d(ypix) - - # Required rotation in the detector frame to match the data. - angle = np.deg2rad(angle) - rot_mat = np.array([[np.cos(angle), -np.sin(angle)], - [np.sin(angle), np.cos(angle)]]) - - # Rotation center set to o1 trace centroid halfway along spectral axis. - points = np.array([xpix - cenx, ypix - ceny]) - rot_points = rot_mat @ points - rot_points[0] += cenx - rot_points[1] += ceny - - # Apply the offsets. - xrot = rot_points[0] + xshift - yrot = rot_points[1] + yshift - - return xrot, yrot - - -def evaluate_model(xmod, transform, xref, yref): - """Evaluate the transformed reference coordinates at particular x-values. - - Parameters - ---------- - xmod : array[float] - The x-values at which to evaluate the transformed coordinates. - transform : array[float] - The transformation parameters. - xref : array[float] - The reference x-positions. - yref : array[float] - The reference y-positions. - - Returns - ------- - ymod : array[float] - The transformed y-coordinates corresponding to xmod. - """ - - angle, xshift, yshift = transform - - # Calculate rotated reference positions. - xrot, yrot = transform_coords(angle, xshift, yshift, xref, yref) - - # After rotation, need to re-sort the x-positions for interpolation. - sort = np.argsort(xrot) - xrot, yrot = xrot[sort], yrot[sort] - - # Interpolate rotated model onto same x scale as data. - ymod = np.interp(xmod, xrot, yrot) - - return ymod - - -def _chi_squared(transform, xref_o1, yref_o1, xref_o2, yref_o2, - xdat_o1, ydat_o1, xdat_o2, ydat_o2): - """Compute the chi-squared statistic for fitting the reference positions - to the true positions. - - Parameters - ---------- - transform : array[float] - The transformation parameters. - xref_o1 : array[float] - The order 1 reference x-positions. - yref_o1 : array[float] - The order 1 reference y-positions. - xref_o2 : array[float] - The order 2 reference x-positions. - yref_o2 : array[float] - The order 2 reference y-positions. - xdat_o1 : array[float] - The order 1 data x-positions. - ydat_o1 : array[float] - The order 1 data y-positions. - xdat_o2 : array[float] - The order 2 data x-positions. - ydat_o2 : array[float] - The order 2 data y-positions. - - Returns - ------- - chisq : float - The chi-squared value of the model fit. - """ - # Interpolate rotated model of first order onto same x scale as data. - ymod_o1 = evaluate_model(xdat_o1, transform, xref_o1, yref_o1) - - # Compute the chi-square. - chisq_o1 = np.nansum((ydat_o1 - ymod_o1)**2) - - # If second order centroids are provided, include them in the calculation. - if xdat_o2 is not None: - # Interpolate rotated model onto same x scale as data. - ymod_o2 = evaluate_model(xdat_o2, transform, xref_o2, yref_o2) - - # Compute the chi-square and add to the first order. - chisq_o2 = np.nansum((ydat_o2 - ymod_o2)**2) - chisq = chisq_o1 + chisq_o2 - # If not, use only the first order. - else: - chisq = chisq_o1 - - return chisq - - -def solve_transform(scidata_bkg, scimask, xref_o1, yref_o1, xref_o2=None, - yref_o2=None, halfwidth=30., is_fitted=(True, True, True), - soss_filter='CLEAR', guess_transform=(None, None, None), - bounds_theta=(-5., 5.), bounds_x=(-3, 3), bounds_y=(-3., 3.)): - """Given a science image, determine the centroids and find the simple - transformation (rotation + vertical & horizonal offset, or some combination - thereof) needed to match xref_o1 and yref_o1 to the image. - - Parameters - ---------- - scidata_bkg : array[float] - A background subtracted image of the SOSS trace. - scimask : array[float] - A boolean mask of pixels to be excluded. - xref_o1 : array[float] - A priori expectation of the order 1 trace x-positions. - yref_o1 : array[float] - A priori expectation of the order 1 trace y-positions. - xref_o2 : array[float] (optional) - A priori expectation of the order 2 trace x-positions. Providing these - will improve the accuracy of the solver. - yref_o2 : array[float] (optional) - A priori expectation of the order 2 trace y-positions. Providing these - will improve the accuracy of the solver. - halfwidth : float (optional) - Size of the aperture mask used when extracting the trace positions - from the data. - is_fitted: tuple, list or array [bool] - Tuple indicating which parameter is fitted in transform. - It is ordered as (rotation, x-shift, y-shift). - Default is (True, True, True), so all parameters are fitted. - When False, the value is fixed to the value in `guess_transform`. - guess_transform: tuple, list or array - Tuple of the initial guess value of each transformation parameters. - It is ordered as (rotation, x-shift, y-shift). If set to None, - the value 0. is given. Default is (None, None, None). - soss_filter : str (optional) - Designator for the SOSS filter used in the observation. Either CLEAR - or F277W. Setting F277W here will force shift to False. - bounds_theta : array[float] (optional) - Boundaries on the rotation angle to consider in the Chi-squared - minimization. - bounds_x : array[float] (optional) - Boundaries on the horizontal offset to consider in the Chi-squared - minimization. - bounds_y : array[float] (optional) - Boundaries on the vertical offset to consider in the Chi-squared - minimization. - - Returns - ------- - simple_transform : array[float] - Array containing the angle, x-shift and y-shift needed to match - xref_o1 and yref_o1 to the image. - """ - # Convert None to 0. in guess_transform and then convert to array - guess_transform = [val if val is not None else 0. for val in guess_transform] - guess_transform = np.array(guess_transform) - - # Convert is_fitted to array - is_fitted = np.array(is_fitted) - - # Start with order 1 centroids as they will be available for all subarrays. - # Remove any NaNs used to pad the xref, yref coordinates. - mask_o1 = np.isfinite(xref_o1) & np.isfinite(yref_o1) - xref_o1 = xref_o1[mask_o1] - yref_o1 = yref_o1[mask_o1] - - # Get centroids from data. - aper_mask_o1 = aperture_mask(xref_o1, yref_o1, halfwidth, scidata_bkg.shape) - mask = aper_mask_o1 | scimask - xdat_o1, ydat_o1, _ = get_centroids_com(scidata_bkg, mask=mask, - poly_order=None) - - # If order 2 centroids are provided, include them in the analysis. The - # inclusion of the order 2 centroids will allow for a more accurate - # determination of the rotation and offset, as the addition of the second - # order provides an anchor in the spatial direction. However, there are - # instances (a SUBSTRIP96, or F277W observation for example) where the - # second order is not available. In this case, work only with order 1. - if xref_o2 is not None and yref_o2 is not None and (soss_filter == 'CLEAR' or soss_filter == 'FULL'): - # Remove any NaNs used to pad the xref, yref coordinates. - log.info('Measuring trace position for orders 1 and 2.') - mask_o2 = np.isfinite(xref_o2) & np.isfinite(yref_o2) - xref_o2 = xref_o2[mask_o2] - yref_o2 = yref_o2[mask_o2] - - # Get centroids from data. - aper_mask_o2 = aperture_mask(xref_o2, yref_o2, halfwidth, scidata_bkg.shape) - mask = aper_mask_o2 | scimask - xdat_o2, ydat_o2, _ = get_centroids_com(scidata_bkg, mask=mask, - poly_order=None) - - # Use only the uncontaminated range between x=800 and x=1700. - mask = (xdat_o1 >= 800) & (xdat_o1 <= 1700) - xdat_o1 = xdat_o1[mask] - ydat_o1 = ydat_o1[mask] - - mask = (xdat_o2 >= 800) & (xdat_o2 <= 1700) - xdat_o2 = xdat_o2[mask] - ydat_o2 = ydat_o2[mask] - - elif soss_filter == 'F277W': - # If the exposure uses the F277W filter, there is no second order, and - # first order centroids are only useful at wavelengths greater than 2.5µm. - # Restrict centroids to lie within region lambda>~2.5µm, where the - # F277W filter response is strong. - log.info('Measuring trace position for order 1 spanning the F277W pixels.') - mask = (xdat_o1 >= 25) & (xdat_o1 <= 425) - xdat_o1 = xdat_o1[mask] - ydat_o1 = ydat_o1[mask] - # Force shift to False as there is not enough information to - # constrain dx, dy and dtheta simultaneously. - is_fitted[1:] = False - # Second order centroids are not available. - xdat_o2, ydat_o2 = None, None - - else: - # If the exposure is SUBSTRIP96 using the CLEAR filter, there is no - # order 2. Use the entire first order to enable the maximum possible - # positional constraint on the centroids. - log.info('Measuring trace position for order 1 only.') - xdat_o2, ydat_o2 = None, None - - # Find the simple transformation via a chi-squared minimization of the - # extracted and reference centroids. This transformation considers by - # default rotation as well as vertical and horizontal offsets. - - # Setup minimizing function depending on the parameters to fit - simple_transform = guess_transform.copy() - - def _chi2_to_fit(values, *args): - simple_transform[is_fitted] = values - return _chi_squared(simple_transform, *args) - - # Set up the optimization problem. - min_args = (xref_o1, yref_o1, xref_o2, yref_o2, - xdat_o1, ydat_o1, xdat_o2, ydat_o2) - - # Define the boundaries - bounds = [bounds_theta, bounds_x, bounds_y] - # Only keep the ones that are fitted - bounds = [bnds for idx, bnds in enumerate(bounds) if is_fitted[idx]] - - # Find the best-fit transformation. - result = minimize(_chi2_to_fit, guess_transform[is_fitted], bounds=bounds, - args=min_args) - simple_transform[is_fitted] = result.x - - return simple_transform - - -def rotate_image(image, angle, origin): - """Rotate an image around a specific pixel. - - Parameters - ---------- - image : array[float] - The image to rotate. - angle : float - The rotation angle in degrees. - origin : Tuple, List, Array - The x and y pixel position around which to rotate. - - Returns - ------- - image_rot : array[float] - The rotated image. - """ - - # Pad image so we can safely rotate around the origin. - padx = [image.shape[1] - origin[0], origin[0]] - pady = [image.shape[0] - origin[1], origin[1]] - image_pad = np.pad(image, [pady, padx], 'constant') - - # Rotate the image. - image_pad_rot = rotate(image_pad, angle, reshape=False) - - # Remove the padding. - image_rot = image_pad_rot[pady[0]:-pady[1], padx[0]:-padx[1]] - - return image_rot - - -def transform_image(angle, xshift, yshift, image, cenx=1024, ceny=50): - """Apply the transformation found by solve_transform() to a 2D reference - map. - - Parameters - ---------- - angle : float - The angle by which to rotate the file, in degrees. - xshift : float - The x-shift to apply in native pixels, will be rounded to the - nearest (oversampled) pixel. - yshift : float - The y-shift to apply in native pixels, will be rounded to the - nearest (oversampled) pixel. - image : array[float] - An image to transform. - cenx : float (optional) - The x-coordinate around which to rotate. - ceny : float (optional) - The y-coordinate around which to rotate. - - Returns - ------- - image_rot : array[float] - The image, after applying the shift and rotation. - """ - - # Rotate the image. - image_rot = rotate_image(image, angle, [cenx, ceny]) - - # Shift the image. - image_rot = shift(image_rot, [yshift, xshift]) - - return image_rot - - -def apply_transform(simple_transform, ref_map, oversample, pad, native=True): - """Apply the calculated rotation and offset to a 2D reference map, and bin - the map down the native size and resolution. - - Parameters - ---------- - simple_transform : Tuple, List, Array - The transformation parameters returned by solve_transform(). - ref_map : array[float] - A reference map: e.g., a 2D Wavelength map or Trace Profile map. - oversample : int - The oversampling factor the reference map. - pad : int - The padding (in native pixels) on the reference map. - native : bool (optional) - If True bin down to native pixel sizes and remove padding. - - Returns - ------- - trans_map : array[float] - The ref_map after having the transformation applied. - """ - - ovs = oversample - - # Unpack the transformation. - angle, xshift, yshift = simple_transform - - # Modify the transformation with the oversampling and padding. - xshift = ovs * xshift - yshift = ovs * yshift - cenx = ovs * (pad + 1024) - ceny = ovs * (pad + 50) - - # Apply the transformation to the reference map. - if (angle == 0 and xshift == 0 and yshift == 0): - trans_map = ref_map - else: - trans_map = transform_image(-angle, xshift, yshift, ref_map, cenx, ceny) - - if native: - if ovs > 1: - # Bin the transformed map down to native resolution. - nrows, ncols = trans_map.shape - trans_map = trans_map.reshape((nrows // ovs), ovs, (ncols // ovs), ovs) - trans_map = trans_map.mean(1).mean(-1) - - # Remove the padding. - if pad != 0: - trans_map = trans_map[pad:-pad, pad:-pad] - - return trans_map - - -def transform_wavemap(simple_transform, wavemap, oversample, pad, native=True): - """Apply the transformation found by solve_transform() to a 2D reference - wavelength map. - - Parameters - ---------- - simple_transform : Tuple, List, Array - The transformation parameters returned by solve_transform(). - wavemap : array[float] - A reference 2D wavelength map. - oversample : int - The oversampling factor the reference map. - pad : int - The padding (in native pixels) on the reference map. - native : bool (optional) - If True bin down to native pixel sizes and remove padding. - - Returns - ------- - trans_wavemap : array[float] - The ref_map after having the transformation applied. - """ - - # Find the minimum and maximum wavelength of the wavelength map. - minval = np.nanmin(wavemap) - maxval = np.nanmax(wavemap) - - # Set NaNs to zero to prevent errors when shifting/rotating. - mask = np.isnan(wavemap) - wavemap = np.where(mask, 0., wavemap) - - # Apply the transformation to the wavelength map. - trans_wavemap = apply_transform(simple_transform, wavemap, oversample, - pad, native=native) - - # Set pixels with interpolation artifacts to zero by enforcing the - # original min/max. - mask = (trans_wavemap < minval) | (trans_wavemap > maxval) - trans_wavemap[mask] = 0 - - return trans_wavemap - - -def transform_profile(simple_transform, profile, oversample, pad, native=True, - norm=True): - """Apply the transformation found by solve_transform() to a 2D reference - trace profile map. - - Parameters - ---------- - simple_transform : Tuple, List, Array - The transformation parameters returned by solve_transform(). - profile : array[float] - A reference 2D trace profile map. - oversample : int - The oversampling factor the reference map. - pad : int - The padding (in native pixels) on the reference map. - native : bool (optional) - If True bin down to native pixel sizes and remove padding. - norm : bool (optional) - If True, normalize each column of the trace profile to sum to one. - - Returns - ------- - trans_profile : array[float] - The ref_map after having the transformation applied. - """ - - # Apply the transformation to the 2D trace map. - trans_profile = apply_transform(simple_transform, profile, oversample, - pad, native=native) - - if norm: - # Normalize so that the columns sum to 1. - with warnings.catch_warnings(): - warnings.simplefilter(action="ignore", category=RuntimeWarning) - trans_profile = trans_profile / np.nansum(trans_profile, axis=0) - - trans_profile[~np.isfinite(trans_profile)] = 0. - - return trans_profile