diff --git a/CHANGES.rst b/CHANGES.rst index f399ccaf..06dcc3a6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,7 +4,12 @@ Bug Fixes --------- -- +ramp_fitting +~~~~~~~~~~~~ + +- Changing where time division occurs during ramp fitting in order to + properly handle special cases where the time is not group time, such + as when ZEROFRAME data is used, so the time is frame time. [#173] Changes to API -------------- diff --git a/src/stcal/ramp_fitting/ols_fit.py b/src/stcal/ramp_fitting/ols_fit.py index 389ba0ec..1f46c3cd 100644 --- a/src/stcal/ramp_fitting/ols_fit.py +++ b/src/stcal/ramp_fitting/ols_fit.py @@ -607,7 +607,12 @@ def find_0th_one_good_group(ramp_data): del bad_1_ ramp_data.one_groups_locs = one_group - # (NFrames + 1) * TFrame / 2 + + # Refer to JP-3242 for derivation. + # Updated: One Group Time = [(TFrame * NFrames * (NFrames + 1)] / [2 * NFrames] + # There is an NFrames term in the numerator and denominator, so when + # cancelled we get: + # One Group Time = (NFrames + 1) * TFrame / 2 ramp_data.one_groups_time = (ramp_data.nframes + 1) * ramp_data.frame_time / 2 @@ -656,11 +661,12 @@ def ols_ramp_fit_single( # This must be done before the ZEROFRAME replacements to prevent # ZEROFRAME replacement being confused for one good group ramps # in the 0th group. - if ramp_data.groupgap > 0: + if ramp_data.nframes > 1: find_0th_one_good_group(ramp_data) if ramp_data.zeroframe is not None: - zframe_locs, cnt = utils.use_zeroframe_for_saturated_ramps(ramp_data) + zframe_mat, zframe_locs, cnt = utils.use_zeroframe_for_saturated_ramps(ramp_data) + ramp_data.zframe_mat = zframe_mat ramp_data.zframe_locs = zframe_locs ramp_data.cnt = cnt @@ -831,9 +837,6 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): gdq_cube_shape : ndarray Group DQ dimensions - effintim : float - effective integration time for a single group - f_max_seg : int Actual maximum number of segments over all groups and segments @@ -867,22 +870,12 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): # Get instrument and exposure data frame_time = ramp_data.frame_time - groupgap = ramp_data.groupgap - nframes = ramp_data.nframes # Get needed sizes and shapes n_int, ngroups, nrows, ncols = data.shape imshape = (nrows, ncols) cubeshape = (ngroups,) + imshape - # Calculate effective integration time (once EFFINTIM has been populated - # and accessible, will use that instead), and other keywords that will - # needed if the pedestal calculation is requested. Note 'nframes' - # is the number of given by the NFRAMES keyword, and is the number of - # frames averaged on-board for a group, i.e., it does not include the - # groupgap. - effintim = (nframes + groupgap) * frame_time - # Get GROUP DQ and ERR arrays from input file gdq_cube = groupdq gdq_cube_shape = gdq_cube.shape @@ -990,6 +983,8 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): del ff_sect del gdq_sect + # END LOOP + if pixeldq_sect is not None: del pixeldq_sect @@ -998,7 +993,7 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting): ramp_data.groupdq = groupdq ramp_data.pixeldq = inpixeldq - return max_seg, gdq_cube_shape, effintim, f_max_seg, dq_int, num_seg_per_int,\ + return max_seg, gdq_cube_shape, f_max_seg, dq_int, num_seg_per_int,\ sat_0th_group_int, opt_res, pixeldq, inv_var, med_rates @@ -1083,7 +1078,7 @@ def ramp_fit_compute_variances(ramp_data, gain_2d, readnoise_2d, fit_slopes_ans) max_seg = fit_slopes_ans[0] num_seg_per_int = fit_slopes_ans[5] - med_rates = fit_slopes_ans[10] + med_rates = fit_slopes_ans[9] var_p3, var_r3, var_p4, var_r4, var_both4, var_both3, \ inv_var_both4, s_inv_var_p3, s_inv_var_r3, s_inv_var_both3, segs_4 = \ @@ -1283,8 +1278,8 @@ def ramp_fit_overall( imshape = (nrows, ncols) # Unpack intermediate computations from preious steps - max_seg, gdq_cube_shape, effintim, f_max_seg, dq_int, num_seg_per_int = fit_slopes_ans[:6] - sat_0th_group_int, opt_res, pixeldq, inv_var, med_rates = fit_slopes_ans[6:] + max_seg, gdq_cube_shape, f_max_seg, dq_int, num_seg_per_int = fit_slopes_ans[:5] + sat_0th_group_int, opt_res, pixeldq, inv_var, med_rates = fit_slopes_ans[5:] var_p3, var_r3, var_p4, var_r4, var_both4, var_both3 = variances_ans[:6] inv_var_both4, s_inv_var_p3, s_inv_var_r3, s_inv_var_both3 = variances_ans[6:] @@ -1375,7 +1370,7 @@ def ramp_fit_overall( opt_res.var_p_seg = var_p4[:, :f_max_seg, :, :] opt_res.var_r_seg = var_r4[:, :f_max_seg, :, :] - opt_info = opt_res.output_optional(effintim) + opt_info = opt_res.output_optional(ramp_data.group_time) else: opt_info = None @@ -1396,7 +1391,7 @@ def ramp_fit_overall( # Output integration-specific results to separate file integ_info = utils.output_integ( - ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_both3) + ramp_data, slope_int, dq_int, var_p3, var_r3, var_both3) if opt_res is not None: del opt_res @@ -1407,9 +1402,9 @@ def ramp_fit_overall( del var_r3 del var_both3 - # Divide slopes by total (summed over all integrations) effective - # integration time to give count rates. - c_rates = slope_dataset2 / effintim + # The slopes per pixel are now computed, except for unusable data + # due to flagging, which is done below. + c_rates = slope_dataset2 # Compress all integration's dq arrays to create 2D PIXELDDQ array for # primary output @@ -2784,7 +2779,7 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, if ngroups == 1: # process all pixels in 1 group/integration dataset slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = \ fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, - sig_slope_s, npix, data, c_mask_2d) + sig_slope_s, npix, data, c_mask_2d, ramp_data) return slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s @@ -2808,7 +2803,7 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, if len(wh_pix_1r[0]) > 0: slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = \ fit_single_read(slope_s, intercept_s, variance_s, sig_intercept_s, - sig_slope_s, npix, data, wh_pix_1r) + sig_slope_s, npix, data, wh_pix_1r, ramp_data) del wh_pix_1r @@ -2818,7 +2813,7 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, slope_s, intercept_s, variance_s, sig_slope_s, sig_intercept_s = \ fit_double_read(c_mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, - variance_s, sig_slope_s, sig_intercept_s, rn_sect) + variance_s, sig_slope_s, sig_intercept_s, rn_sect, ramp_data) del wh_pix_2r @@ -2844,6 +2839,8 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, slope, intercept, sig_slope, sig_intercept = \ calc_opt_fit(nreads_wtd, sumxx, sumx, sumxy, sumy) + slope = slope / ramp_data.group_time + variance = sig_slope**2. # variance due to fit values elif weighting.lower() == 'unweighted': # fit using unweighted weighting @@ -2877,7 +2874,7 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting, gdq_sect_r, def fit_single_read(slope_s, intercept_s, variance_s, sig_intercept_s, - sig_slope_s, npix, data, wh_pix_1r): + sig_slope_s, npix, data, wh_pix_1r, ramp_data): """ For datasets having >2 groups/integrations, for any semiramp in which the 0th group is good and the 1st group is either SAT or CR, set slope, etc. @@ -2908,6 +2905,9 @@ def fit_single_read(slope_s, intercept_s, variance_s, sig_intercept_s, wh_pix_1r : tuple locations of pixels whose only good group is the 0th group + ramp_data : RampData + The ramp data needed for processing, specifically flag values. + Returns ------- slope_s : ndarray @@ -2926,7 +2926,24 @@ def fit_single_read(slope_s, intercept_s, variance_s, sig_intercept_s, 1-D sigma of y-intercepts from fit for data section """ data0_slice = data[0, :, :].reshape(npix) - slope_s[wh_pix_1r] = data0_slice[wh_pix_1r] + + # If the one_groups_time is defined, use it. + if ramp_data.one_groups_time is not None: + slope_s[wh_pix_1r] = data0_slice[wh_pix_1r] / ramp_data.one_groups_time + timing = ramp_data.one_groups_time + else: + slope_s[wh_pix_1r] = data0_slice[wh_pix_1r] / ramp_data.group_time + timing = ramp_data.group_time + + # Adjust slope if ZEROFRAME used. The slope numerator should be + # the frame time if the ZEROFRAME is used. + if ramp_data.zframe_mat is not None: + adjustment = timing / ramp_data.frame_time + good_0th_mat = np.zeros((data0_slice.shape), dtype=np.uint8) + good_0th_mat[wh_pix_1r] = 1 + zframe = ramp_data.zframe_mat[ramp_data.current_integ, :, :].reshape(npix) + adj_mat = good_0th_mat & zframe + slope_s[adj_mat == 1] *= adjustment # The following arrays will have values correctly calculated later; for # now they are just place-holders @@ -2939,7 +2956,7 @@ def fit_single_read(slope_s, intercept_s, variance_s, sig_intercept_s, def fit_double_read(mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, - variance_s, sig_slope_s, sig_intercept_s, rn_sect): + variance_s, sig_slope_s, sig_intercept_s, rn_sect, ramp_data): """ Process all semi-ramps having exactly 2 good groups. May need to optimize later to remove loop over pixels. @@ -2973,6 +2990,9 @@ def fit_double_read(mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, rn_sect : ndarray 2-D read noise values for all pixels in data section + ramp_data : RampData + The ramp data needed for processing, specifically flag values. + Returns ------- slope_s : ndarray @@ -3003,7 +3023,7 @@ def fit_double_read(mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, data_semi = data_ramp[mask_2d[:, pixel_ff]] # picks only the 2 diff_data = data_semi[1] - data_semi[0] - slope_s[pixel_ff] = diff_data + slope_s[pixel_ff] = diff_data / ramp_data.group_time intercept_s[pixel_ff] = \ data_semi[1] * (1. - second_read) + data_semi[0] * second_read # by geometry variance_s[pixel_ff] = 2.0 * rn * rn @@ -3130,7 +3150,7 @@ def calc_opt_fit(nreads_wtd, sumxx, sumx, sumxy, sumy): def fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, - sig_slope_s, npix, data, mask_2d): + sig_slope_s, npix, data, mask_2d, ramp_data): """ This function sets the fitting arrays for datasets having only 1 group per integration. @@ -3161,6 +3181,9 @@ def fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, mask_2d : ndarray delineates which channels to fit for each pixel, 2-D bool + ramp_data : RampData + The ramp data needed for processing, specifically flag values. + Returns ------- slope_s : ndarray @@ -3183,6 +3206,7 @@ def fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, # time to give the count rate. Recalculate other fit quantities to be # benign. slope_s = data[0, :, :].reshape(npix) + slope_s = slope_s / ramp_data.group_time # The following arrays will have values correctly calculated later; for # now they are just place-holders @@ -3408,6 +3432,8 @@ def fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, variance_s[one_group_locs] = 1. del one_group_locs + slope_s = slope_s / ramp_data.group_time + return slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s diff --git a/src/stcal/ramp_fitting/ramp_fit_class.py b/src/stcal/ramp_fitting/ramp_fit_class.py index 084622b1..1eafe1d0 100644 --- a/src/stcal/ramp_fitting/ramp_fit_class.py +++ b/src/stcal/ramp_fitting/ramp_fit_class.py @@ -26,6 +26,7 @@ def __init__(self): self.flags_unreliable_slope = None # ZEROFRAME + self.zframe_mat = None self.zframe_locs = None self.zframe_cnt = 0 self.zeroframe = None diff --git a/src/stcal/ramp_fitting/utils.py b/src/stcal/ramp_fitting/utils.py index ecef1a3a..172e00d2 100644 --- a/src/stcal/ramp_fitting/utils.py +++ b/src/stcal/ramp_fitting/utils.py @@ -250,7 +250,7 @@ def shrink_crmag(self, n_int, dq_cube, imshape, nreads, jump_det): else: self.cr_mag_seg = cr_com[:, :max_num_crs, :, :] - def output_optional(self, effintim): + def output_optional(self, group_time): """ These results are the cosmic ray magnitudes in the segment-specific results for the count rates, y-intercept, @@ -265,7 +265,7 @@ def output_optional(self, effintim): Parameters ---------- - effintim : float + group_time : float effective integration time for a single group Returns @@ -284,8 +284,6 @@ def output_optional(self, effintim): self.weights[1. / self.weights > LARGE_VARIANCE_THRESHOLD] = 0. warnings.resetwarnings() - self.slope_seg /= effintim - opt_info = (self.slope_seg, self.sigslope_seg, self.var_p_seg, self.var_r_seg, self.yint_seg, self.sigyint_seg, self.ped_int, self.weights, self.cr_mag_seg) @@ -631,8 +629,8 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg return den_r3, den_p3, num_r3, segs_beg_3 -def calc_pedestal(ramp_data, num_int, slope_int, firstf_int, dq_first, nframes, - groupgap, dropframes1): +def calc_pedestal(ramp_data, num_int, slope_int, firstf_int, dq_first, + nframes, groupgap, dropframes1): """ The pedestal is calculated by extrapolating the final slope for each pixel from its value at the first sample in the integration to an exposure time @@ -671,8 +669,8 @@ def calc_pedestal(ramp_data, num_int, slope_int, firstf_int, dq_first, nframes, pedestal image, 2-D float """ ff_all = firstf_int[num_int, :, :].astype(np.float32) - ped = ff_all - slope_int[num_int, ::] * \ - (((nframes + 1.) / 2. + dropframes1) / (nframes + groupgap)) + tmp = (((nframes + 1.) / 2. + dropframes1) / (nframes + groupgap)) + ped = ff_all - slope_int[num_int, ::] * tmp sat_flag = ramp_data.flags_saturated ped[np.bitwise_and(dq_first, sat_flag) == sat_flag] = 0 @@ -681,7 +679,7 @@ def calc_pedestal(ramp_data, num_int, slope_int, firstf_int, dq_first, nframes, return ped -def output_integ(ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_both3): +def output_integ(ramp_data, slope_int, dq_int, var_p3, var_r3, var_both3): """ For the OLS algorithm, construct the output integration-specific results. Any variance values that are a large fraction of the default value @@ -702,9 +700,6 @@ def output_integ(ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_bot dq_int : ndarray Data cube of DQ arrays for each integration, 3-D int - effintim : float - Effective integration time per integration - var_p3 : ndarray Cube of integration-specific values for the slope variance due to Poisson noise only, 3-D float @@ -731,7 +726,7 @@ def output_integ(ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_bot var_r3[var_r3 > LARGE_VARIANCE_THRESHOLD] = 0. var_both3[var_both3 > LARGE_VARIANCE_THRESHOLD] = 0. - data = slope_int / effintim + data = slope_int invalid_data = ramp_data.flags_saturated | ramp_data.flags_do_not_use data[np.bitwise_and(dq_int, invalid_data).astype(bool)] = np.nan @@ -835,6 +830,7 @@ def shift_z(a, off): def get_efftim_ped(ramp_data): """ + XXX - Work to remove this function. Calculate the effective integration time for a single group, and return the number of frames per group, and the number of frames dropped between groups. @@ -1592,6 +1588,7 @@ def use_zeroframe_for_saturated_ramps(ramp_data): zframe_locs = [None] * nints cnt = 0 + zframe_mat = np.zeros((nints, nrows, ncols), dtype=np.uint8) for integ in range(nints): intdq = dq[integ, :, :, :] @@ -1614,11 +1611,12 @@ def use_zeroframe_for_saturated_ramps(ramp_data): col_list.append(col) ramp_data.data[integ, 0, row, col] = ramp_data.zeroframe[integ, row, col] ramp_data.groupdq[integ, 0, row, col] = good_flag + zframe_mat[integ, row, col] = 1 cnt = cnt + 1 zframe_locs[integ] = (np.array(row_list, dtype=int), np.array(col_list, dtype=int)) - return zframe_locs, cnt + return zframe_mat, zframe_locs, cnt def groups_saturated_in_integration(intdq, sat_flag, num_sat_groups): diff --git a/tests/test_ramp_fitting.py b/tests/test_ramp_fitting.py index 73eb6300..a50411d4 100644 --- a/tests/test_ramp_fitting.py +++ b/tests/test_ramp_fitting.py @@ -474,8 +474,9 @@ def run_one_group_ramp_suppression(nints, suppress): ngroups, nrows, ncols = 5, 1, 3 dims = (nints, ngroups, nrows, ncols) rnoise, gain = 10, 1 - nframes, group_time, frame_time = 1, 5.0, 1 + nframes, frame_time, groupgap = 1, 1, 0 var = rnoise, gain + group_time = (nframes + groupgap) * frame_time tm = nframes, group_time, frame_time # Using the above create the classes and arrays. @@ -528,19 +529,19 @@ def test_one_group_ramp_suppressed_one_integration(): # Check slopes information sdata, sdq, svp, svr, serr = slopes - check = np.array([[np.nan, np.nan, 1.0000002]]) + check = np.array([[np.nan, np.nan, 1.0000001]]) np.testing.assert_allclose(sdata, check, tol) check = np.array([[DNU | SAT, DNU, GOOD]]) np.testing.assert_allclose(sdq, check, tol) - check = np.array([[0., 0., 0.01]]) + check = np.array([[0., 0., 0.25]]) np.testing.assert_allclose(svp, check, tol) - check = np.array([[0., 0., 0.19999999]]) + check = np.array([[0., 0., 4.999999]]) np.testing.assert_allclose(svr, check, tol) - check = np.array([[0., 0., 0.45825756]]) + check = np.array([[0., 0., 2.2912877]]) np.testing.assert_allclose(serr, check, tol) # Check slopes information @@ -552,13 +553,13 @@ def test_one_group_ramp_suppressed_one_integration(): check = np.array([[[DNU | SAT, DNU, GOOD]]]) np.testing.assert_allclose(cdq, check, tol) - check = np.array([[[0., 0., 0.01]]]) + check = np.array([[[0., 0., 0.25]]]) np.testing.assert_allclose(cvp, check, tol) - check = np.array([[[0., 0., 0.19999999]]]) + check = np.array([[[0., 0., 4.999999]]]) np.testing.assert_allclose(cvr, check, tol) - check = np.array([[[0., 0., 0.4582576]]]) + check = np.array([[[0., 0., 2.291288]]]) np.testing.assert_allclose(cerr, check, tol) @@ -573,19 +574,19 @@ def test_one_group_ramp_not_suppressed_one_integration(): # Check slopes information sdata, sdq, svp, svr, serr = slopes - check = np.array([[np.nan, 1., 1.0000002]]) + check = np.array([[np.nan, 1., 1.0000001]]) np.testing.assert_allclose(sdata, check, tol) check = np.array([[DNU | SAT, GOOD, GOOD]]) np.testing.assert_allclose(sdq, check, tol) - check = np.array([[0., 0.04, 0.01]]) + check = np.array([[0., 1., 0.25]]) np.testing.assert_allclose(svp, check, tol) - check = np.array([[0., 3.9999995, 0.19999999]]) + check = np.array([[0., 100., 5.0000005]]) np.testing.assert_allclose(svr, check, tol) - check = np.array([[0., 2.009975, 0.45825756]]) + check = np.array([[0., 10.049875, 2.291288]]) np.testing.assert_allclose(serr, check, tol) # Check slopes information @@ -597,13 +598,13 @@ def test_one_group_ramp_not_suppressed_one_integration(): check = np.array([[[DNU | SAT, GOOD, GOOD]]]) np.testing.assert_allclose(cdq, check, tol) - check = np.array([[[0., 0.04, 0.01]]]) + check = np.array([[[0., 1, 0.25]]]) np.testing.assert_allclose(cvp, check, tol) - check = np.array([[[0., 3.9999995, 0.19999999]]]) + check = np.array([[[0., 100., 5.0000005]]]) np.testing.assert_allclose(cvr, check, tol) - check = np.array([[[0., 2.0099752, 0.4582576]]]) + check = np.array([[[0., 10.049875, 2.291288]]]) np.testing.assert_allclose(cerr, check, tol) @@ -619,25 +620,25 @@ def test_one_group_ramp_suppressed_two_integrations(): # Check slopes information sdata, sdq, svp, svr, serr = slopes - check = np.array([[1.0000001, 1.0000001, 1.0000002]]) + check = np.array([[1.0000001, 1.0000001, 1.0000001]]) np.testing.assert_allclose(sdata, check, tol) check = np.array([[GOOD, GOOD, GOOD]]) np.testing.assert_allclose(sdq, check, tol) - check = np.array([[0.005, 0.01, 0.005]]) + check = np.array([[0.125, 0.25, 0.125]]) np.testing.assert_allclose(svp, check, tol) - check = np.array([[0.19999999, 0.19999999, 0.09999999]]) + check = np.array([[4.999998 , 4.999998 , 2.4999995]]) np.testing.assert_allclose(svr, check, tol) - check = np.array([[0.45276925, 0.45825756, 0.32403702]]) + check = np.array([[2.263846 , 2.2912874, 1.620185]]) np.testing.assert_allclose(serr, check, tol) # Check slopes information cdata, cdq, cvp, cvr, cerr = cube - check = np.array([[[np.nan, np.nan, 1.0000001]], + check = np.array([[[np.nan, np.nan, 1.0000001]], [[1.0000001, 1.0000001, 1.0000001]]]) np.testing.assert_allclose(cdata, check, tol) @@ -645,16 +646,16 @@ def test_one_group_ramp_suppressed_two_integrations(): [[GOOD, GOOD, GOOD]]]) np.testing.assert_allclose(cdq, check, tol) - check = np.array([[[0., 0., 0.01]], - [[0.005, 0.01, 0.01]]]) + check = np.array([[[0., 0., 0.25]], + [[0.125, 0.25, 0.25]]]) np.testing.assert_allclose(cvp, check, tol) - check = np.array([[[0., 0., 0.19999999]], - [[0.19999999, 0.19999999, 0.19999999]]]) + check = np.array([[[0., 0., 4.999999]], + [[4.999999, 4.999999, 4.999999]]]) np.testing.assert_allclose(cvr, check, tol) - check = np.array([[[0., 0., 0.4582576]], - [[0.45276922, 0.4582576, 0.4582576]]]) + check = np.array([[[0., 0., 2.291288]], + [[2.2638464, 2.291288, 2.291288]]]) np.testing.assert_allclose(cerr, check, tol) @@ -670,25 +671,25 @@ def test_one_group_ramp_not_suppressed_two_integrations(): # Check slopes information sdata, sdq, svp, svr, serr = slopes - check = np.array([[1.0000001, 1.0000002, 1.0000002]]) + check = np.array([[1.0000001, 1.0000001, 1.0000001]]) np.testing.assert_allclose(sdata, check, tol) check = np.array([[GOOD, GOOD, GOOD]]) np.testing.assert_allclose(sdq, check, tol) - check = np.array([[0.005, 0.008, 0.005]]) + check = np.array([[0.125, 0.2, 0.125]]) np.testing.assert_allclose(svp, check, tol) - check = np.array([[0.19999999, 0.19047618, 0.09999999]]) + check = np.array([[5. , 4.7619047, 2.5000002]]) np.testing.assert_allclose(svr, check, tol) - check = np.array([[0.45276925, 0.44550666, 0.32403702]]) + check = np.array([[2.2638464, 2.2275333, 1.6201853]]) np.testing.assert_allclose(serr, check, tol) # Check slopes information cdata, cdq, cvp, cvr, cerr = cube - check = np.array([[[np.nan, 1., 1.0000001]], + check = np.array([[[np.nan, 1., 1.0000001]], [[1.0000001, 1.0000001, 1.0000001]]]) np.testing.assert_allclose(cdata, check, tol) @@ -696,16 +697,16 @@ def test_one_group_ramp_not_suppressed_two_integrations(): [[GOOD, GOOD, GOOD]]]) np.testing.assert_allclose(cdq, check, tol) - check = np.array([[[0., 0.04, 0.01]], - [[0.005, 0.01, 0.01]]]) + check = np.array([[[0., 1., 0.25]], + [[0.125, 0.25, 0.25]]]) np.testing.assert_allclose(cvp, check, tol) - check = np.array([[[0., 3.9999995, 0.19999999]], - [[0.19999999, 0.19999999, 0.19999999]]]) + check = np.array([[[0., 100., 5.0000005]], + [[5.0000005, 5.0000005, 5.0000005]]]) np.testing.assert_allclose(cvr, check, tol) - check = np.array([[[0., 2.0099752, 0.4582576]], - [[0.45276922, 0.4582576, 0.4582576]]]) + check = np.array([[[0., 10.049875 , 2.291288]], + [[2.2638464, 2.291288 , 2.291288]]]) np.testing.assert_allclose(cerr, check, tol) @@ -798,7 +799,7 @@ def test_zeroframe(): # Check slopes information sdata, sdq, svp, svr, serr = slopes - check = np.array([[32.78594, 18.62891, 23.787909]]) + check = np.array([[48.965397, 18.628912, 47.863224]]) np.testing.assert_allclose(sdata, check, tol, tol) check = np.array([[GOOD, GOOD, GOOD]]) @@ -819,7 +820,7 @@ def test_zeroframe(): # The third pixel in integration zero has good data # because the zeroframe has good data, so the ramp # is not fully saturated. - check = np.array([[[149.0313, np.nan, 130.40239]], + check = np.array([[[298.0626, np.nan, 652.01196]], [[18.62891, 18.62891, 18.62891]]]) np.testing.assert_allclose(cdata, check, tol, tol) @@ -923,7 +924,7 @@ def test_only_good_0th_group(): # Because the number of groups used in the first two ramps are different # the variances are expected to be different, even though the slopes # should be the same. - check = np.array([[37.257824, 37.257824, 149.0313]]) + check = np.array([[37.257824, 37.257824, 496.77103]]) np.testing.assert_allclose(sdata, check, tol, tol) check = np.array([[GOOD, GOOD, GOOD]]) @@ -1414,6 +1415,10 @@ def print_real_check(real, check): print("=" * 80) +def print_arr_str(arr): + return np.array2string(arr, max_line_width=np.nan, separator=", ") + + def base_print(label, arr): arr_str = np.array2string(arr, max_line_width=np.nan, separator=", ") print(label) @@ -1563,19 +1568,3 @@ def print_all_info(slopes, cube, optional): print_slopes(slopes) print_integ(cube) print_optional(optional) - - -def print_ramp_data(ramp_data): - print(DELIM) - print_ramp_data_data(ramp_data) - print(DELIM) - print_ramp_data_dq(ramp_data) - print(DELIM) - - -def print_ramp_data_data(ramp_data): - base_print("RampData Data:", ramp_data.data) - - -def print_ramp_data_dq(ramp_data): - base_print("RampData Data Quality:", ramp_data.groupdq)