From b0626c2f0472f07033a12aa37a760eadb0e6996f Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Sun, 21 Jan 2024 21:45:32 -0500 Subject: [PATCH 01/10] enable interpolation during data operations if the q values between two datasets do not match --- sasdata/dataloader/data_info.py | 187 +++++++++++++++++++------------- 1 file changed, 110 insertions(+), 77 deletions(-) diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index 2135928..274d97e 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -24,6 +24,7 @@ import copy import numpy as np +from typing import Optional from sasdata.data_util.uncertainty import Uncertainty @@ -52,22 +53,32 @@ class plottable_1D(object): _yaxis = '' _yunit = '' + # operation data + _x_op = None + _y_op = None + _dx_op = None + _dy_op = None + _dxl_op = None + _dxw_op = None + _lam_op = None + _dlam_op = None + def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None): self.x = np.asarray(x) self.y = np.asarray(y) if dx is not None: - self.dx = np.asarray(dx) + self.dx = np.asarray(dx, dtype=float) if dy is not None: - self.dy = np.asarray(dy) + self.dy = np.asarray(dy, dtype=float) if dxl is not None: - self.dxl = np.asarray(dxl) + self.dxl = np.asarray(dxl, dtype=float) if dxw is not None: - self.dxw = np.asarray(dxw) + self.dxw = np.asarray(dxw, dtype=float) if lam is not None: - self.lam = np.asarray(lam) + self.lam = np.asarray(lam, dtype=float) if dlam is not None: - self.dlam = np.asarray(dlam) + self.dlam = np.asarray(dlam, dtype=float) def xaxis(self, label, unit): """ @@ -785,12 +796,12 @@ def clone_without_data(self, length=0, clone=None): from copy import deepcopy if clone is None or not issubclass(clone.__class__, Data1D): - x = np.zeros(length) - dx = np.zeros(length) - y = np.zeros(length) - dy = np.zeros(length) - lam = np.zeros(length) - dlam = np.zeros(length) + x = np.zeros(length, dype=float) + dx = np.zeros(length, dype=float) + y = np.zeros(length, dype=float) + dy = np.zeros(length, dype=float) + lam = np.zeros(length, dype=float) + dlam = np.zeros(length, dype=float) clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam) clone.title = self.title @@ -828,81 +839,103 @@ def copy_from_datainfo(self, data1d): self.yaxis(data1d._yaxis, data1d._yunit) self.title = data1d.title - def _validity_check(self, other): + def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01): """ - Checks that the data lengths are compatible. - Checks that the x vectors are compatible. - Returns errors vectors equal to original - errors vectors if they were present or vectors - of zeros when none was found. + Checks that x values for two datasets have overlapping ranges for an operation. + If so, _x_op, _y_op, _dx_op, _dy_op, _dxl_op, _dxw_op, _lam, _dlam for both self and other are updated to + values that will be used for the operation. - :param other: other data set for operation - :return: dy for self, dy for other [numpy arrays] - :raise ValueError: when lengths are not compatible + :param other: other data for operation + :param tolerance: acceptable deviation in matching x data points, default 0.01 (equivalent to 1 % deviation) + :raise ValueError: x-ranges of self and other do not overlap """ - dy_other = None + # clear old interpolation arrays from previous operations on the data + # this should probably be done immediately following the operation but placing here for now if isinstance(other, Data1D): - # Check that data lengths are the same - if len(self.x) != len(other.x) or len(self.y) != len(other.y): - msg = "Unable to perform operation: data length are not equal" + # check if ranges of self.x and other.x overlap at all + if np.min(other.x) > np.max(self.x) or np.max(other.x) < np.min(self.x): + msg = "Unable to perform operation: x-ranges do not overlap." raise ValueError(msg) - # Here we could also extrapolate between data points - TOLERANCE = 0.01 - for i in range(len(self.x)): - if fabs(self.x[i] - other.x[i]) > self.x[i]*TOLERANCE: - msg = "Incompatible data sets: x-values do not match" - raise ValueError(msg) - - # Check that the other data set has errors, otherwise - # create zero vector - dy_other = other.dy - if other.dy is None or (len(other.dy) != len(other.y)): - dy_other = np.zeros(len(other.y)) - - # Check that we have errors, otherwise create zero vector - dy = self.dy - if self.dy is None or (len(self.dy) != len(self.y)): - dy = np.zeros(len(self.y)) - - return dy, dy_other + # check if data points match (within tolerance) in overlap range of self.x and other.x + # if we start to lose self.x values in the overlap region (i.e., points do not match up) + # this will fail and interpolation of the other dataset is performed + self_overlap = np.abs((self.x[:, None] - other.x[None, :]) / self.x[:, None]).min(axis=1) <= tolerance + self_overlap_pts = np.flatnonzero(self_overlap) + if len(self_overlap_pts) == len(self.x[self_overlap_pts.min():self_overlap_pts.max()+1]): + x_interp = self.x[self_overlap] + interp_mask = self_overlap # mask for self.x to select overlap region between datasets + match_pts = np.abs(x_interp[:, None] - other.x[None, :]).argmin(axis=1) + y_interp = np.copy(other.y)[match_pts] + other_dy = np.zeros(y_interp.size) if other.dy is None else np.copy(other.dy)[match_pts] + other_dx = None if other.dx is None else np.copy(other.dx)[match_pts] + other_dxl = None if other.dxl is None else np.copy(other.dxl)[match_pts] + other_dxw = None if other.dxw is None else np.copy(other.dxw)[match_pts] + # we need to interpolate the data + else: + self_overlap = np.zeros(self.x.size, dtype=bool) + self_overlap[self_overlap_pts.min():self_overlap_pts.max()+1] = True + x_interp = np.copy(self.x)[self_overlap_pts.min():self_overlap_pts.max()+1] + interp_mask = self_overlap + # linear interpolation on a log scale + y_interp = np.power(10, np.interp(np.log10(x_interp), np.log10(other.x), np.log10(other.y))) + other_dy = np.zeros(y_interp.size) + # unsure if the following is correct, but setting resolutions to None if data is interpolated + other_dx = None + other_dxl = None + other_dxw = None + + other._x_op = x_interp + other._y_op = y_interp + other._dy_op = other_dy + other._dx_op = other_dx + other._dxl_op = other_dxl + other._dxw_op = other_dxw + # make sure other parameters are cleared from previous operations + other._lam_op = None + other._dlam_op = None + else: + # other is something besides Data1D and so all points in self should be used for operation + # don't mess with the other parameters since it's not Data1D + interp_mask = np.ones(self.x.size, dtype=bool) + + # update operation parameters of self + self._x_op = self.x[interp_mask] + self._y_op = self.y[interp_mask] + self._dy_op = self.dy[interp_mask] if self.dy is not None else np.zeros(self._y_op.size, dtype=float) + self._dx_op = self.dx[interp_mask] if self.dx is not None else None + self._dxl_op = self.dxl[interp_mask] if self.dxl is not None else None + self._dxw_op = self.dxw[interp_mask] if self.dxw is not None else None + self._lam_op = self.lam[interp_mask] if self.lam is not None else None + self._dlam_op = self.dlam[interp_mask] if self.dlam is not None else None def _perform_operation(self, other, operation): """ """ - # First, check the data compatibility - dy, dy_other = self._validity_check(other) - result = self.clone_without_data(len(self.x)) - if self.dxw is None: - result.dxw = None - else: - result.dxw = np.zeros(len(self.x)) - if self.dxl is None: - result.dxl = None - else: - result.dxl = np.zeros(len(self.x)) - - for i in range(len(self.x)): - result.x[i] = self.x[i] - if self.dx is not None and len(self.x) == len(self.dx): - result.dx[i] = self.dx[i] - if self.dxw is not None and len(self.x) == len(self.dxw): - result.dxw[i] = self.dxw[i] - if self.dxl is not None and len(self.x) == len(self.dxl): - result.dxl[i] = self.dxl[i] - - a = Uncertainty(self.y[i], dy[i]**2) + # Check for compatibility of the x-ranges and populate the data used for the operation + # interpolation will be implemented on the 'other' dataset as needed + self._interpolation_operation(other) + + result = self.clone_without_data(self._x_op.size) + result.x = np.copy(self._x_op) + # result.y is initialized as arrays of zero with length of _x_op + # result.dy is initialized as arrays of zero with length of _x_op + result.dx = None if self._dx_op is None else np.copy(self._dx_op) + result.dxl = None if self._dxl_op is None else np.copy(self._dxl_op) + result.dxw = None if self._dxw_op is None else np.copy(self._dxw_op) + result.lam = None if self._lam_op is None else np.copy(self._lam_op) + result.dlam = None if self._dlam_op is None else np.copy(self._dlam_op) + + for i in range(result.x.size): + + a = Uncertainty(self._y_op[i], self._dy_op[i]**2) if isinstance(other, Data1D): - b = Uncertainty(other.y[i], dy_other[i]**2) - if other.dx is not None: - result.dx[i] *= self.dx[i] - result.dx[i] += (other.dx[i]**2) - result.dx[i] /= 2 - result.dx[i] = math.sqrt(result.dx[i]) - if result.dxl is not None and other.dxl is not None: - result.dxl[i] *= self.dxl[i] - result.dxl[i] += (other.dxl[i]**2) - result.dxl[i] /= 2 - result.dxl[i] = math.sqrt(result.dxl[i]) + b = Uncertainty(other._y_op[i], other._dy_op[i]**2) + if result.dx is not None and other._dx_op is not None: + result.dx[i] = math.sqrt((self._dx_op[i]**2 + other._dx_op[i]**2) / 2) + if result.dxl is not None and other._dxl_op is not None: + result.dxl[i] = math.sqrt((self._dxl_op[i]**2 + other._dxl_op[i]**2) / 2) + if result.dxw is not None and other._dxw_op is not None: + result.dxw[i] = math.sqrt((self._dxw_op[i]**2 + other._dxw_op[i]**2) / 2) else: b = other From 0d6d8e6dc5094c906e5c6487c2d77afb948ada72 Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Sun, 21 Jan 2024 23:00:51 -0500 Subject: [PATCH 02/10] adding test functions for interpolation of operations --- test/sasdataloader/utest_data_info.py | 207 ++++++++++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 test/sasdataloader/utest_data_info.py diff --git a/test/sasdataloader/utest_data_info.py b/test/sasdataloader/utest_data_info.py new file mode 100644 index 0000000..9484664 --- /dev/null +++ b/test/sasdataloader/utest_data_info.py @@ -0,0 +1,207 @@ +import unittest + +import numpy as np +from numpy.testing import assert_allclose, assert_equal + +from sasdata.dataloader.data_info import Data1D +from sasdata.data_util.uncertainty import Uncertainty + +RTOL = 1e-12 + + +class Data1DTests(unittest.TestCase): + """ + This testing class for plottable_1D is incomplete. + Creating class to test _perform_operation and _interpolation_operation only. CMW 1-21-2024 + """ + + def test_interpolation_operation(self): + """ + Test whether the operation check and interpolation is performed correctly. + """ + + # test x2 range within x1 range + data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) + data2 = Data1D(x=[2, 3], y=[0.2, 0.5]) + data1._interpolation_operation(data2) + assert_allclose(np.array([2., 3.]), data1._x_op, RTOL) + assert_allclose(np.array([2., 3.]), data2._x_op, RTOL) + assert_allclose(np.array([3., 4.]), data1._y_op, RTOL) + assert_allclose(np.array([0.2, 0.5]), data2._y_op, RTOL) + + # test x1 range within x2 range + data1 = Data1D(x=[2, 3], y=[0.2, 0.5]) + data2 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) + data1._interpolation_operation(data2) + assert_allclose(np.array([2., 3.]), data1._x_op, RTOL) + assert_allclose(np.array([2., 3.]), data2._x_op, RTOL) + assert_allclose(np.array([0.2, 0.5]), data1._y_op, RTOL) + assert_allclose(np.array([3., 4.]), data2._y_op, RTOL) + + # test overlap of x2 at high x1 + data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) + data2 = Data1D(x=[3, 4, 5], y=[0.2, 0.5, 0.7]) + data1._interpolation_operation(data2) + assert_allclose(np.array([3., 4.]), data1._x_op, RTOL) + assert_allclose(np.array([3., 4.]), data2._x_op, RTOL) + assert_allclose(np.array([4., 5.]), data1._y_op, RTOL) + assert_allclose(np.array([0.2, 0.5]), data2._y_op, RTOL) + + # test overlap of x2 at low x1 + data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) + data2 = Data1D(x=[0.2, 1, 2], y=[0.2, 0.5, 0.7]) + data1._interpolation_operation(data2) + assert_allclose(np.array([1., 2.]), data1._x_op, RTOL) + assert_allclose(np.array([1., 2.]), data2._x_op, RTOL) + assert_allclose(np.array([2., 3.]), data1._y_op, RTOL) + assert_allclose(np.array([0.5, 0.7]), data2._y_op, RTOL) + + # test equal x1 and x 2 + data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) + data2 = Data1D(x=[1, 2, 3, 4], y=[0.2, 0.3, 0.4, 0.5]) + data1._interpolation_operation(data2) + assert_allclose(np.array([1., 2., 3., 4.]), data1._x_op, RTOL) + assert_allclose(np.array([1., 2., 3., 4.]), data2._x_op, RTOL) + assert_allclose(np.array([2., 3., 4., 5.]), data1._y_op, RTOL) + assert_allclose(np.array([0.2, 0.3, 0.4, 0.5]), data2._y_op, RTOL) + + # check once that these are all 0 or None if not supplied in original datasets + assert_equal(data1._dy_op, 0) + self.assertIsNone(data1._dx_op) + self.assertIsNone(data1._dxl_op) + self.assertIsNone(data1._dxw_op) + self.assertIsNone(data1._lam_op) + self.assertIsNone(data1._dlam_op) + assert_equal(data2._dy_op, 0) + self.assertIsNone(data2._dx_op) + self.assertIsNone(data2._dxl_op) + self.assertIsNone(data2._dxw_op) + self.assertIsNone(data2._lam_op) + self.assertIsNone(data2._dlam_op) + + # test tolerance + data1 = Data1D(x=[1, 2, 3, 4, 5], y=[2, 3, 4, 5, 6]) + data2 = Data1D(x=[1, 2.19999, 3, 4.2, 5.6, 6], y=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7]) + data1._interpolation_operation(data2, tolerance=0.1) + assert_allclose(np.array([1., 2., 3., 4.]), data1._x_op, RTOL) + assert_allclose(np.array([1., 2., 3., 4.]), data2._x_op, RTOL) + assert_allclose(np.array([2, 3, 4., 5.]), data1._y_op, RTOL) + assert_allclose(np.array([0.2, 0.3, 0.4, 0.5]), data2._y_op, RTOL) + + # test interpolation + data1 = Data1D(x=[1, 2, 3, 4, 5], y=[2, 3, 4, 5, 6]) + data2 = Data1D(x=[2, 2.5, 3.5, 5], y=[0.4, 0.5, 0.6, 0.7]) + data1._interpolation_operation(data2) + assert_allclose(np.array([2., 3., 4., 5.]), data1._x_op, RTOL) + assert_allclose(np.array([2., 3., 4., 5.]), data2._x_op, RTOL) + assert_allclose(np.array([3., 4., 5., 6.]), data1._y_op, RTOL) + assert_allclose(np.array([0.4, 0.5519189701334538, 0.6356450684803129, 0.7]), data2._y_op, RTOL) + + # check these are copied over appropriately with no interpolation + # test overlap of x2 at low x1 + data1 = Data1D(x=[1, 2, 3, 4], + y=[2, 3, 4, 5], + dy=[0.02, 0.03, 0.04, 0.05], + dx=[0.01, 0.02, 0.03, 0.04], + lam=[10, 11, 12, 13], + dlam=[0.1, 0.11, 0.12, 0.13]) + data1.dxl = np.array([0.1, 0.2, 0.3, 0.4]) + data1.dxw = np.array([0.4, 0.3, 0.2, 0.4]) + data2 = Data1D(x=[0.2, 1, 2], + y=[0.2, 0.5, 0.7], + dy=[0.002, 0.005, 0.007], + dx=[0.002, 0.01, 0.02], + lam=[13, 12, 11], + dlam=[0.13, 0.12, 0.11]) + data2.dxl = np.array([0.5, 0.6, 0.7]) + data2.dxw = np.array([0.7, 0.6, 0.5]) + data1._interpolation_operation(data2) + + assert_allclose(np.array([0.02, 0.03]), data1._dy_op, RTOL) + assert_allclose(np.array([0.01, 0.02]), data1._dx_op, RTOL) + assert_allclose(np.array([10, 11]), data1._lam_op, RTOL) + assert_allclose(np.array([0.1, 0.11]), data1._dlam_op, RTOL) + assert_allclose(np.array([0.1, 0.2]), data1._dxl_op, RTOL) + assert_allclose(np.array([0.4, 0.3]), data1._dxw_op, RTOL) + + assert_allclose(np.array([0.005, 0.007]), data2._dy_op, RTOL) + assert_allclose(np.array([0.01, 0.02]), data2._dx_op, RTOL) + self.assertIsNone(data2._lam_op) # does not get translated to the resulting dataset from operations + self.assertIsNone(data2._dlam_op) # does not get transferred to the resulting dataset from operations + assert_allclose(np.array([0.6, 0.7]), data2._dxl_op, RTOL) + assert_allclose(np.array([0.6, 0.5]), data2._dxw_op, RTOL) + + # check these are copied over appropriately with interpolation + # test overlap of x2 at low x1 + data1 = Data1D(x=[1, 1.5, 2, 3], + y=[2, 3, 4, 5], + dy=[0.02, 0.03, 0.04, 0.05], + dx=[0.01, 0.02, 0.03, 0.04], + lam=[10, 11, 12, 13], + dlam=[0.1, 0.11, 0.12, 0.13]) + data1.dxl = np.array([0.1, 0.2, 0.3, 0.4]) + data1.dxw = np.array([0.4, 0.3, 0.2, 0.4]) + data2 = Data1D(x=[0.2, 1, 2], + y=[0.2, 0.5, 0.7], + dy=[0.002, 0.005, 0.007], + dx=[0.002, 0.01, 0.02], + lam=[13, 12, 11], + dlam=[0.13, 0.12, 0.11]) + data2.dxl = np.array([0.5, 0.6, 0.7]) + data2.dxw = np.array([0.7, 0.6, 0.5]) + data1._interpolation_operation(data2) + + assert_allclose(np.array([0.02, 0.03, 0.04]), data1._dy_op, RTOL) + assert_allclose(np.array([0.01, 0.02, 0.03]), data1._dx_op, RTOL) + assert_allclose(np.array([10, 11, 12]), data1._lam_op, RTOL) + assert_allclose(np.array([0.1, 0.11, 0.12]), data1._dlam_op, RTOL) + assert_allclose(np.array([0.1, 0.2, 0.3]), data1._dxl_op, RTOL) + assert_allclose(np.array([0.4, 0.3, 0.2]), data1._dxw_op, RTOL) + + assert_equal(0, data2._dy_op) + self.assertIsNone(data2._dx_op) + self.assertIsNone(data2._lam_op) + self.assertIsNone(data2._dlam_op) + self.assertIsNone(data2._dxl_op) + self.assertIsNone(data2._dxw_op) + + def test_perform_operation(self): + """ + Test that the operation is performed correctly for two datasets. + """ + def operation(a, b): + return a - b + + data1 = Data1D(x=[1, 2, 3, 4], + y=[2, 3, 4, 5], + dy=[0.02, 0.03, 0.04, 0.05], + dx=[0.01, 0.02, 0.03, 0.04], + lam=[10, 11, 12, 13], + dlam=[0.1, 0.11, 0.12, 0.13]) + data1.dxl = np.array([0.1, 0.2, 0.3, 0.4]) + data1.dxw = np.array([0.4, 0.3, 0.2, 0.4]) + data2 = Data1D(x=[0.2, 1, 2], + y=[0.2, 0.5, 0.7], + dy=[0.002, 0.005, 0.007], + dx=[0.002, 0.01, 0.03], + lam=[13, 12, 11], + dlam=[0.13, 0.12, 0.11]) + data2.dxl = np.array([0.5, 0.6, 0.7]) + data2.dxw = np.array([0.7, 0.6, 0.5]) + result = data1._perform_operation(data2, operation) + + assert_allclose(np.array([1., 2.]), result.x, RTOL) + assert_allclose(np.array([1.5, 2.3]), result.y, RTOL) + # determined target values using Uncertainty + assert_allclose(np.array([0.000425, 0.000949]), result.dy, RTOL) + assert_equal(result.lam, data1._lam_op) + assert_equal(result.dlam, data1._dlam_op) + assert_allclose(np.array([0.01, 0.0254951]), result.dx, RTOL) + assert_allclose(np.array([0.43011626, 0.51478151]), result.dxl, RTOL) + assert_allclose(np.array([0.50990195, 0.41231056]), result.dxw, RTOL) + + +if __name__ == '__main__': + unittest.main() + + From 5c090d562d0abfeafa1d8f33c3e9137f7fe91775 Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Mon, 22 Jan 2024 00:07:09 -0500 Subject: [PATCH 03/10] fixed numpy version issue with dtype argument for zeros --- sasdata/dataloader/data_info.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index 274d97e..b56a632 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -796,12 +796,12 @@ def clone_without_data(self, length=0, clone=None): from copy import deepcopy if clone is None or not issubclass(clone.__class__, Data1D): - x = np.zeros(length, dype=float) - dx = np.zeros(length, dype=float) - y = np.zeros(length, dype=float) - dy = np.zeros(length, dype=float) - lam = np.zeros(length, dype=float) - dlam = np.zeros(length, dype=float) + x = np.zeros(length) + dx = np.zeros(length) + y = np.zeros(length) + dy = np.zeros(length) + lam = np.zeros(length) + dlam = np.zeros(length) clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam) clone.title = self.title From b635bfd246e224a97c55a255efb2f5f01e7a055a Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Mon, 22 Jan 2024 00:56:07 -0500 Subject: [PATCH 04/10] fixed failing unit test for the interpolation of data operations --- test/sasdataloader/utest_data_info.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test/sasdataloader/utest_data_info.py b/test/sasdataloader/utest_data_info.py index 9484664..7d312ad 100644 --- a/test/sasdataloader/utest_data_info.py +++ b/test/sasdataloader/utest_data_info.py @@ -192,13 +192,16 @@ def operation(a, b): assert_allclose(np.array([1., 2.]), result.x, RTOL) assert_allclose(np.array([1.5, 2.3]), result.y, RTOL) - # determined target values using Uncertainty - assert_allclose(np.array([0.000425, 0.000949]), result.dy, RTOL) + # determine target values using Uncertainty (not a check for correctness of Uncertainty) + u1 = Uncertainty(np.array([3, 4]), np.array([0.02**2, 0.03**2])) + u2 = Uncertainty(np.array([0.5, 0.7]), np.array([0.005**2, 0.007**2])) + u3 = u1-u2 + assert_allclose(np.sqrt(np.abs(u3.variance)), result.dy, RTOL) assert_equal(result.lam, data1._lam_op) assert_equal(result.dlam, data1._dlam_op) - assert_allclose(np.array([0.01, 0.0254951]), result.dx, RTOL) - assert_allclose(np.array([0.43011626, 0.51478151]), result.dxl, RTOL) - assert_allclose(np.array([0.50990195, 0.41231056]), result.dxw, RTOL) + assert_allclose(np.sqrt(np.abs((data1._dx_op**2+data2._dx_op**2)/2)), result.dx, RTOL) + assert_allclose(np.sqrt(np.abs((data1._dxl_op**2+data2._dxl_op**2)/2)), result.dxl, RTOL) + assert_allclose(np.sqrt(np.abs((data1._dxw_op**2+data2._dxw_op**2)/2)), result.dxw, RTOL) if __name__ == '__main__': From f39d0ba56e4b2d2454ba34e5a639dad6413acc6a Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Mon, 22 Jan 2024 14:54:08 -0500 Subject: [PATCH 05/10] cleaning up interpolation code and creating missing unit tests --- sasdata/data_util/interpolations.py | 77 +++++++++ sasdata/dataloader/data_info.py | 156 +++++++++--------- test/sasdataloader/utest_data_info.py | 140 ++++++++-------- test/sasmanipulations/utest_interpolations.py | 85 ++++++++++ 4 files changed, 312 insertions(+), 146 deletions(-) create mode 100644 sasdata/data_util/interpolations.py create mode 100644 test/sasmanipulations/utest_interpolations.py diff --git a/sasdata/data_util/interpolations.py b/sasdata/data_util/interpolations.py new file mode 100644 index 0000000..5a35d1e --- /dev/null +++ b/sasdata/data_util/interpolations.py @@ -0,0 +1,77 @@ +""" +Interpolation functions for 1d data sets. +""" + +import numpy as np +from numpy.typing import ArrayLike +from typing import Optional, Union + + +def linear(x_interp: ArrayLike, x: ArrayLike, y: ArrayLike, dy: Optional[ArrayLike] = None)\ + -> tuple[np.ndarray, Union[np.ndarray, None]]: + """ + Computes linear interpolation of dataset (x, y) at the points x_interp. + Error propagation is implemented when dy is provided. + Requires that min(x) <= x_interp <= max(x) + + TODO: reductus package has a similar function in err1d if we add the additional dependency + """ + x_interp = np.array(x_interp) + sort = np.argsort(x) + x = np.array(x)[sort] + y = np.array(y)[sort] + dy = np.array(dy)[sort] if dy is not None else None + + # find out where the interpolated points fit into the existing data + index_2 = np.searchsorted(x, x_interp) + index_1 = index_2 - 1 + + # linear interpolation of new y points + y_interp_1 = y[index_1] * (x_interp - x[index_2]) / (x[index_1] - x[index_2]) + y_interp_2 = y[index_2] * (x_interp - x[index_1]) / (x[index_2] - x[index_1]) + y_interp = y_interp_1 + y_interp_2 + + # error propagation + if dy is not None: + dy_interp_1 = dy[index_1] ** 2 * ((x_interp - x[index_2]) / (x[index_1] - x[index_2])) ** 2 + dy_interp_2 = dy[index_2] ** 2 * ((x_interp - x[index_1]) / (x[index_2] - x[index_1])) ** 2 + dy_interp = np.sqrt(dy_interp_1 + dy_interp_2) + else: + dy_interp = None + + return y_interp, dy_interp + + +def linear_scales(x_interp: ArrayLike, + x: ArrayLike, + y: ArrayLike, + dy: Optional[ArrayLike] = None, + scale: Optional[str] = "linear") -> tuple[np.ndarray, Union[np.ndarray, None]]: + """ + Perform linear interpolation on different scales. + Error propagation is implemented when dy is provided. + + Scale is set to "linear" by default. + Setting scale to "log" will perform the interpolation of (log(x), log(y)) at log(x_interp); log(y_interp) will be + converted back to y_interp in the return. + + Returns (y_interp, dy_interp | None) + """ + x = np.array(x) + y = np.array(y) + + if scale == "linear": + result = linear(x_interp=x_interp, x=x, y=y, dy=dy) + return result + + elif scale == "log": + dy = np.array(dy) / y if dy is not None else None + x_interp = np.log(x_interp) + x = np.log(x) + y = np.log(y) + result = linear(x_interp=x_interp, x=x, y=y, dy=dy) + y_interp = np.exp(result[0]) + dy_interp = None if result[1] is None else y_interp * result[1] + return y_interp, dy_interp + + diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index b56a632..344b49f 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -27,6 +27,7 @@ from typing import Optional from sasdata.data_util.uncertainty import Uncertainty +from sasdata.data_util import interpolations class plottable_1D(object): @@ -54,14 +55,7 @@ class plottable_1D(object): _yunit = '' # operation data - _x_op = None - _y_op = None - _dx_op = None - _dy_op = None - _dxl_op = None - _dxw_op = None - _lam_op = None - _dlam_op = None + _operation = None # Data1D object that stores points used for operations def __init__(self, x, y, dx=None, dy=None, dxl=None, dxw=None, lam=None, dlam=None): @@ -839,7 +833,7 @@ def copy_from_datainfo(self, data1d): self.yaxis(data1d._yaxis, data1d._yunit) self.title = data1d.title - def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01): + def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, scale: Optional[str] = 'log'): """ Checks that x values for two datasets have overlapping ranges for an operation. If so, _x_op, _y_op, _dx_op, _dy_op, _dxl_op, _dxw_op, _lam, _dlam for both self and other are updated to @@ -847,66 +841,74 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01): :param other: other data for operation :param tolerance: acceptable deviation in matching x data points, default 0.01 (equivalent to 1 % deviation) + :param scale: default 'log', performs interpolation on log scale; use 'linear' for sesans data :raise ValueError: x-ranges of self and other do not overlap """ - # clear old interpolation arrays from previous operations on the data - # this should probably be done immediately following the operation but placing here for now if isinstance(other, Data1D): # check if ranges of self.x and other.x overlap at all if np.min(other.x) > np.max(self.x) or np.max(other.x) < np.min(self.x): - msg = "Unable to perform operation: x-ranges do not overlap." + msg = "Unable to perform operation: x ranges of two datasets do not overlap." raise ValueError(msg) - # check if data points match (within tolerance) in overlap range of self.x and other.x - # if we start to lose self.x values in the overlap region (i.e., points do not match up) - # this will fail and interpolation of the other dataset is performed - self_overlap = np.abs((self.x[:, None] - other.x[None, :]) / self.x[:, None]).min(axis=1) <= tolerance - self_overlap_pts = np.flatnonzero(self_overlap) - if len(self_overlap_pts) == len(self.x[self_overlap_pts.min():self_overlap_pts.max()+1]): - x_interp = self.x[self_overlap] - interp_mask = self_overlap # mask for self.x to select overlap region between datasets - match_pts = np.abs(x_interp[:, None] - other.x[None, :]).argmin(axis=1) - y_interp = np.copy(other.y)[match_pts] - other_dy = np.zeros(y_interp.size) if other.dy is None else np.copy(other.dy)[match_pts] - other_dx = None if other.dx is None else np.copy(other.dx)[match_pts] - other_dxl = None if other.dxl is None else np.copy(other.dxl)[match_pts] - other_dxw = None if other.dxw is None else np.copy(other.dxw)[match_pts] - # we need to interpolate the data + # check if data points match (within tolerance) across the overlap range of self.x and other.x + self_overlap_bool = np.abs((self.x[:, None] - other.x[None, :]) / self.x[:, None]).min(axis=1) <= tolerance + self_overlap_index = np.flatnonzero(self_overlap_bool) + if len(self_overlap_index) == self_overlap_index.max() - self_overlap_index.min() + 1: + # all the points in overlap region of self.x found close matches in overlap region of other.x + x_op = self.x[self_overlap_bool] + other._operation = other.clone_without_data(x_op.size) + other_overlap_index = (np.abs(x_op[:, None] - other.x[None, :])).argmin(axis=1) + y_op_other = np.copy(other.y)[other_overlap_index] + dy_op_other = np.zeros(y_op_other.size) if other.dy is None else np.copy(other.dy)[other_overlap_index] + dx_op_other = None if other.dx is None else np.copy(other.dx)[other_overlap_index] + dxl_op_other = None if other.dxl is None else np.copy(other.dxl)[other_overlap_index] + dxw_op_other = None if other.dxw is None else np.copy(other.dxw)[other_overlap_index] + lam_op_other = None if other.lam is None else np.copy(other.lam)[other_overlap_index] + dlam_op_other = None if other.dlam is None else np.copy(other.dlam)[other_overlap_index] else: - self_overlap = np.zeros(self.x.size, dtype=bool) - self_overlap[self_overlap_pts.min():self_overlap_pts.max()+1] = True - x_interp = np.copy(self.x)[self_overlap_pts.min():self_overlap_pts.max()+1] - interp_mask = self_overlap - # linear interpolation on a log scale - y_interp = np.power(10, np.interp(np.log10(x_interp), np.log10(other.x), np.log10(other.y))) - other_dy = np.zeros(y_interp.size) - # unsure if the following is correct, but setting resolutions to None if data is interpolated - other_dx = None - other_dxl = None - other_dxw = None - - other._x_op = x_interp - other._y_op = y_interp - other._dy_op = other_dy - other._dx_op = other_dx - other._dxl_op = other_dxl - other._dxw_op = other_dxw - # make sure other parameters are cleared from previous operations - other._lam_op = None - other._dlam_op = None + # not all the points found a close match so implementing interpolation on log scale + self_overlap_bool = (self.x >= max([self.x.min(), other.x.min()])) & (self.x <= min([self.x.max(), other.x.max()])) + self_overlap_index = np.flatnonzero(self_overlap_bool) + x_op = self.x[self_overlap_bool] + other._operation = other.clone_without_data(x_op.size) + if scale == 'log': + y_op_other, dy_op_other = \ + interpolations.linear_scales(x_interp=x_op, x=other.x, y=other.y, dy=other.dy, scale='log') + else: + y_op_other, dy_op_other = interpolations.linear(x_interp=x_op, x=other.x, y=other.y, dy=other.dy) + + # setting resolutions and wavelength parameters to None if data is interpolated + # TODO: determine proper propagation of resolution through interpolation + dx_op_other = None + dxl_op_other = None + dxw_op_other = None + lam_op_other = None + dlam_op_other = None + + other._operation.x = x_op + other._operation.y = y_op_other + other._operation.dy = dy_op_other if dy_op_other is not None else np.zeros(x_op.size) + other._operation.dx = dx_op_other + other._operation.dxl = dxl_op_other + other._operation.dxw = dxw_op_other + other._operation.lam = lam_op_other + other._operation.dlam = dlam_op_other + else: # other is something besides Data1D and so all points in self should be used for operation - # don't mess with the other parameters since it's not Data1D - interp_mask = np.ones(self.x.size, dtype=bool) - - # update operation parameters of self - self._x_op = self.x[interp_mask] - self._y_op = self.y[interp_mask] - self._dy_op = self.dy[interp_mask] if self.dy is not None else np.zeros(self._y_op.size, dtype=float) - self._dx_op = self.dx[interp_mask] if self.dx is not None else None - self._dxl_op = self.dxl[interp_mask] if self.dxl is not None else None - self._dxw_op = self.dxw[interp_mask] if self.dxw is not None else None - self._lam_op = self.lam[interp_mask] if self.lam is not None else None - self._dlam_op = self.dlam[interp_mask] if self.dlam is not None else None + self_overlap_bool = np.ones(self.x.size, dtype=bool) + self_overlap_index = np.arange(0, self.x.size+1, 1) + + # setup _operation Data1D for self + self._operation = self.clone_without_data(self_overlap_index.size) + self._operation.x = self.x[self_overlap_bool] + self._operation.y = self.y[self_overlap_bool] + self._operation.dy = self.dy[self_overlap_bool] if self.dy is not None \ + else np.zeros(self._operation.y.size, dtype=float) + self._operation.dx = self.dx[self_overlap_bool] if self.dx is not None else None + self._operation.dxl = self.dxl[self_overlap_bool] if self.dxl is not None else None + self._operation.dxw = self.dxw[self_overlap_bool] if self.dxw is not None else None + self._operation.lam = self.lam[self_overlap_bool] if self.lam is not None else None + self._operation.dlam = self.dlam[self_overlap_bool] if self.dlam is not None else None def _perform_operation(self, other, operation): """ @@ -915,27 +917,27 @@ def _perform_operation(self, other, operation): # interpolation will be implemented on the 'other' dataset as needed self._interpolation_operation(other) - result = self.clone_without_data(self._x_op.size) - result.x = np.copy(self._x_op) - # result.y is initialized as arrays of zero with length of _x_op - # result.dy is initialized as arrays of zero with length of _x_op - result.dx = None if self._dx_op is None else np.copy(self._dx_op) - result.dxl = None if self._dxl_op is None else np.copy(self._dxl_op) - result.dxw = None if self._dxw_op is None else np.copy(self._dxw_op) - result.lam = None if self._lam_op is None else np.copy(self._lam_op) - result.dlam = None if self._dlam_op is None else np.copy(self._dlam_op) + result = self.clone_without_data(self._operation.x.size) + result.x = np.copy(self._operation.x) + # result.y is initialized as arrays of zero with length of _operation.x + # result.dy is initialized as arrays of zero with length of _operation.x + result.dx = None if self._operation.dx is None else np.copy(self._operation.dx) + result.dxl = None if self._operation.dxl is None else np.copy(self._operation.dxl) + result.dxw = None if self._operation.dxw is None else np.copy(self._operation.dxw) + result.lam = None if self._operation.lam is None else np.copy(self._operation.lam) + result.dlam = None if self._operation.dlam is None else np.copy(self._operation.dlam) for i in range(result.x.size): - a = Uncertainty(self._y_op[i], self._dy_op[i]**2) + a = Uncertainty(self._operation.y[i], self._operation.dy[i]**2) if isinstance(other, Data1D): - b = Uncertainty(other._y_op[i], other._dy_op[i]**2) - if result.dx is not None and other._dx_op is not None: - result.dx[i] = math.sqrt((self._dx_op[i]**2 + other._dx_op[i]**2) / 2) - if result.dxl is not None and other._dxl_op is not None: - result.dxl[i] = math.sqrt((self._dxl_op[i]**2 + other._dxl_op[i]**2) / 2) - if result.dxw is not None and other._dxw_op is not None: - result.dxw[i] = math.sqrt((self._dxw_op[i]**2 + other._dxw_op[i]**2) / 2) + b = Uncertainty(other._operation.y[i], other._operation.dy[i]**2) + if result.dx is not None and other._operation.dx is not None: + result.dx[i] = math.sqrt((self._operation.dx[i]**2 + other._operation.dx[i]**2) / 2) + if result.dxl is not None and other._operation.dxl is not None: + result.dxl[i] = math.sqrt((self._operation.dxl[i]**2 + other._operation.dxl[i]**2) / 2) + if result.dxw is not None and other._operation.dxw is not None: + result.dxw[i] = math.sqrt((self._operation.dxw[i]**2 + other._operation.dxw[i]**2) / 2) else: b = other diff --git a/test/sasdataloader/utest_data_info.py b/test/sasdataloader/utest_data_info.py index 7d312ad..582bb2e 100644 --- a/test/sasdataloader/utest_data_info.py +++ b/test/sasdataloader/utest_data_info.py @@ -21,81 +21,83 @@ def test_interpolation_operation(self): """ # test x2 range within x1 range + # INTERPOLATION OPERATION TEST 1 data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) data2 = Data1D(x=[2, 3], y=[0.2, 0.5]) data1._interpolation_operation(data2) - assert_allclose(np.array([2., 3.]), data1._x_op, RTOL) - assert_allclose(np.array([2., 3.]), data2._x_op, RTOL) - assert_allclose(np.array([3., 4.]), data1._y_op, RTOL) - assert_allclose(np.array([0.2, 0.5]), data2._y_op, RTOL) + assert_allclose(np.array([2., 3.]), data1._operation.x, RTOL) + assert_allclose(np.array([2., 3.]), data2._operation.x, RTOL) + assert_allclose(np.array([3., 4.]), data1._operation.y, RTOL) + assert_allclose(np.array([0.2, 0.5]), data2._operation.y, RTOL) # test x1 range within x2 range data1 = Data1D(x=[2, 3], y=[0.2, 0.5]) data2 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) data1._interpolation_operation(data2) - assert_allclose(np.array([2., 3.]), data1._x_op, RTOL) - assert_allclose(np.array([2., 3.]), data2._x_op, RTOL) - assert_allclose(np.array([0.2, 0.5]), data1._y_op, RTOL) - assert_allclose(np.array([3., 4.]), data2._y_op, RTOL) + assert_allclose(np.array([2., 3.]), data1._operation.x, RTOL) + assert_allclose(np.array([2., 3.]), data2._operation.x, RTOL) + assert_allclose(np.array([0.2, 0.5]), data1._operation.y, RTOL) + assert_allclose(np.array([3., 4.]), data2._operation.y, RTOL) # test overlap of x2 at high x1 data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) data2 = Data1D(x=[3, 4, 5], y=[0.2, 0.5, 0.7]) data1._interpolation_operation(data2) - assert_allclose(np.array([3., 4.]), data1._x_op, RTOL) - assert_allclose(np.array([3., 4.]), data2._x_op, RTOL) - assert_allclose(np.array([4., 5.]), data1._y_op, RTOL) - assert_allclose(np.array([0.2, 0.5]), data2._y_op, RTOL) + assert_allclose(np.array([3., 4.]), data1._operation.x, RTOL) + assert_allclose(np.array([3., 4.]), data2._operation.x, RTOL) + assert_allclose(np.array([4., 5.]), data1._operation.y, RTOL) + assert_allclose(np.array([0.2, 0.5]), data2._operation.y, RTOL) # test overlap of x2 at low x1 data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) data2 = Data1D(x=[0.2, 1, 2], y=[0.2, 0.5, 0.7]) data1._interpolation_operation(data2) - assert_allclose(np.array([1., 2.]), data1._x_op, RTOL) - assert_allclose(np.array([1., 2.]), data2._x_op, RTOL) - assert_allclose(np.array([2., 3.]), data1._y_op, RTOL) - assert_allclose(np.array([0.5, 0.7]), data2._y_op, RTOL) + assert_allclose(np.array([1., 2.]), data1._operation.x, RTOL) + assert_allclose(np.array([1., 2.]), data2._operation.x, RTOL) + assert_allclose(np.array([2., 3.]), data1._operation.y, RTOL) + assert_allclose(np.array([0.5, 0.7]), data2._operation.y, RTOL) # test equal x1 and x 2 data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5]) data2 = Data1D(x=[1, 2, 3, 4], y=[0.2, 0.3, 0.4, 0.5]) data1._interpolation_operation(data2) - assert_allclose(np.array([1., 2., 3., 4.]), data1._x_op, RTOL) - assert_allclose(np.array([1., 2., 3., 4.]), data2._x_op, RTOL) - assert_allclose(np.array([2., 3., 4., 5.]), data1._y_op, RTOL) - assert_allclose(np.array([0.2, 0.3, 0.4, 0.5]), data2._y_op, RTOL) + assert_allclose(np.array([1., 2., 3., 4.]), data1._operation.x, RTOL) + assert_allclose(np.array([1., 2., 3., 4.]), data2._operation.x, RTOL) + assert_allclose(np.array([2., 3., 4., 5.]), data1._operation.y, RTOL) + assert_allclose(np.array([0.2, 0.3, 0.4, 0.5]), data2._operation.y, RTOL) # check once that these are all 0 or None if not supplied in original datasets - assert_equal(data1._dy_op, 0) - self.assertIsNone(data1._dx_op) - self.assertIsNone(data1._dxl_op) - self.assertIsNone(data1._dxw_op) - self.assertIsNone(data1._lam_op) - self.assertIsNone(data1._dlam_op) - assert_equal(data2._dy_op, 0) - self.assertIsNone(data2._dx_op) - self.assertIsNone(data2._dxl_op) - self.assertIsNone(data2._dxw_op) - self.assertIsNone(data2._lam_op) - self.assertIsNone(data2._dlam_op) + assert_equal(data1._operation.dy, 0) + assert_equal(data2._operation.dy, 0) + self.assertIsNone(data1._operation.dx) + self.assertIsNone(data1._operation.dxl) + self.assertIsNone(data1._operation.dxw) + self.assertIsNone(data1._operation.lam) + self.assertIsNone(data1._operation.dlam) + + self.assertIsNone(data2._operation.dx) + self.assertIsNone(data2._operation.dxl) + self.assertIsNone(data2._operation.dxw) + self.assertIsNone(data2._operation.lam) + self.assertIsNone(data2._operation.dlam) # test tolerance data1 = Data1D(x=[1, 2, 3, 4, 5], y=[2, 3, 4, 5, 6]) data2 = Data1D(x=[1, 2.19999, 3, 4.2, 5.6, 6], y=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7]) data1._interpolation_operation(data2, tolerance=0.1) - assert_allclose(np.array([1., 2., 3., 4.]), data1._x_op, RTOL) - assert_allclose(np.array([1., 2., 3., 4.]), data2._x_op, RTOL) - assert_allclose(np.array([2, 3, 4., 5.]), data1._y_op, RTOL) - assert_allclose(np.array([0.2, 0.3, 0.4, 0.5]), data2._y_op, RTOL) + assert_allclose(np.array([1., 2., 3., 4.]), data1._operation.x, RTOL) + assert_allclose(np.array([1., 2., 3., 4.]), data2._operation.x, RTOL) + assert_allclose(np.array([2, 3, 4., 5.]), data1._operation.y, RTOL) + assert_allclose(np.array([0.2, 0.3, 0.4, 0.5]), data2._operation.y, RTOL) # test interpolation data1 = Data1D(x=[1, 2, 3, 4, 5], y=[2, 3, 4, 5, 6]) data2 = Data1D(x=[2, 2.5, 3.5, 5], y=[0.4, 0.5, 0.6, 0.7]) data1._interpolation_operation(data2) - assert_allclose(np.array([2., 3., 4., 5.]), data1._x_op, RTOL) - assert_allclose(np.array([2., 3., 4., 5.]), data2._x_op, RTOL) - assert_allclose(np.array([3., 4., 5., 6.]), data1._y_op, RTOL) - assert_allclose(np.array([0.4, 0.5519189701334538, 0.6356450684803129, 0.7]), data2._y_op, RTOL) + assert_allclose(np.array([2., 3., 4., 5.]), data1._operation.x, RTOL) + assert_allclose(np.array([2., 3., 4., 5.]), data2._operation.x, RTOL) + assert_allclose(np.array([3., 4., 5., 6.]), data1._operation.y, RTOL) + assert_allclose(np.array([0.4, 0.5519189701334538, 0.6356450684803129, 0.7]), data2._operation.y, RTOL) # check these are copied over appropriately with no interpolation # test overlap of x2 at low x1 @@ -117,19 +119,19 @@ def test_interpolation_operation(self): data2.dxw = np.array([0.7, 0.6, 0.5]) data1._interpolation_operation(data2) - assert_allclose(np.array([0.02, 0.03]), data1._dy_op, RTOL) - assert_allclose(np.array([0.01, 0.02]), data1._dx_op, RTOL) - assert_allclose(np.array([10, 11]), data1._lam_op, RTOL) - assert_allclose(np.array([0.1, 0.11]), data1._dlam_op, RTOL) - assert_allclose(np.array([0.1, 0.2]), data1._dxl_op, RTOL) - assert_allclose(np.array([0.4, 0.3]), data1._dxw_op, RTOL) + assert_allclose(np.array([0.02, 0.03]), data1._operation.dy, RTOL) + assert_allclose(np.array([0.01, 0.02]), data1._operation.dx, RTOL) + assert_allclose(np.array([10, 11]), data1._operation.lam, RTOL) + assert_allclose(np.array([0.1, 0.11]), data1._operation.dlam, RTOL) + assert_allclose(np.array([0.1, 0.2]), data1._operation.dxl, RTOL) + assert_allclose(np.array([0.4, 0.3]), data1._operation.dxw, RTOL) - assert_allclose(np.array([0.005, 0.007]), data2._dy_op, RTOL) - assert_allclose(np.array([0.01, 0.02]), data2._dx_op, RTOL) - self.assertIsNone(data2._lam_op) # does not get translated to the resulting dataset from operations - self.assertIsNone(data2._dlam_op) # does not get transferred to the resulting dataset from operations - assert_allclose(np.array([0.6, 0.7]), data2._dxl_op, RTOL) - assert_allclose(np.array([0.6, 0.5]), data2._dxw_op, RTOL) + assert_allclose(np.array([0.005, 0.007]), data2._operation.dy, RTOL) + assert_allclose(np.array([0.01, 0.02]), data2._operation.dx, RTOL) + assert_allclose(np.array([12, 11.]), data2._operation.lam, RTOL) + assert_allclose(np.array([0.12, 0.11]), data2._operation.dlam, RTOL) + assert_allclose(np.array([0.6, 0.7]), data2._operation.dxl, RTOL) + assert_allclose(np.array([0.6, 0.5]), data2._operation.dxw, RTOL) # check these are copied over appropriately with interpolation # test overlap of x2 at low x1 @@ -151,19 +153,19 @@ def test_interpolation_operation(self): data2.dxw = np.array([0.7, 0.6, 0.5]) data1._interpolation_operation(data2) - assert_allclose(np.array([0.02, 0.03, 0.04]), data1._dy_op, RTOL) - assert_allclose(np.array([0.01, 0.02, 0.03]), data1._dx_op, RTOL) - assert_allclose(np.array([10, 11, 12]), data1._lam_op, RTOL) - assert_allclose(np.array([0.1, 0.11, 0.12]), data1._dlam_op, RTOL) - assert_allclose(np.array([0.1, 0.2, 0.3]), data1._dxl_op, RTOL) - assert_allclose(np.array([0.4, 0.3, 0.2]), data1._dxw_op, RTOL) + assert_allclose(np.array([0.02, 0.03, 0.04]), data1._operation.dy, RTOL) + assert_allclose(np.array([0.01, 0.02, 0.03]), data1._operation.dx, RTOL) + assert_allclose(np.array([10, 11, 12]), data1._operation.lam, RTOL) + assert_allclose(np.array([0.1, 0.11, 0.12]), data1._operation.dlam, RTOL) + assert_allclose(np.array([0.1, 0.2, 0.3]), data1._operation.dxl, RTOL) + assert_allclose(np.array([0.4, 0.3, 0.2]), data1._operation.dxw, RTOL) - assert_equal(0, data2._dy_op) - self.assertIsNone(data2._dx_op) - self.assertIsNone(data2._lam_op) - self.assertIsNone(data2._dlam_op) - self.assertIsNone(data2._dxl_op) - self.assertIsNone(data2._dxw_op) + assert_allclose(np.array([0.005, 0.0043663206993972085, 0.007]), data2._operation.dy) + self.assertIsNone(data2._operation.dx) + self.assertIsNone(data2._operation.lam) + self.assertIsNone(data2._operation.dlam) + self.assertIsNone(data2._operation.dxl) + self.assertIsNone(data2._operation.dxw) def test_perform_operation(self): """ @@ -197,11 +199,11 @@ def operation(a, b): u2 = Uncertainty(np.array([0.5, 0.7]), np.array([0.005**2, 0.007**2])) u3 = u1-u2 assert_allclose(np.sqrt(np.abs(u3.variance)), result.dy, RTOL) - assert_equal(result.lam, data1._lam_op) - assert_equal(result.dlam, data1._dlam_op) - assert_allclose(np.sqrt(np.abs((data1._dx_op**2+data2._dx_op**2)/2)), result.dx, RTOL) - assert_allclose(np.sqrt(np.abs((data1._dxl_op**2+data2._dxl_op**2)/2)), result.dxl, RTOL) - assert_allclose(np.sqrt(np.abs((data1._dxw_op**2+data2._dxw_op**2)/2)), result.dxw, RTOL) + assert_equal(result.lam, data1._operation.lam) + assert_equal(result.dlam, data1._operation.dlam) + assert_allclose(np.sqrt(np.abs((data1._operation.dx**2+data2._operation.dx**2)/2)), result.dx, RTOL) + assert_allclose(np.sqrt(np.abs((data1._operation.dxl**2+data2._operation.dxl**2)/2)), result.dxl, RTOL) + assert_allclose(np.sqrt(np.abs((data1._operation.dxw**2+data2._operation.dxw**2)/2)), result.dxw, RTOL) if __name__ == '__main__': diff --git a/test/sasmanipulations/utest_interpolations.py b/test/sasmanipulations/utest_interpolations.py new file mode 100644 index 0000000..ccba8d9 --- /dev/null +++ b/test/sasmanipulations/utest_interpolations.py @@ -0,0 +1,85 @@ +import unittest + +import numpy as np +from numpy.testing import assert_allclose + +from sasdata.data_util import interpolations + +RTOL = 1e-12 + +class Data1DTests(unittest.TestCase): + """ + This testing class for plottable_1D is incomplete. + Creating class to test _perform_operation and _interpolation_operation only. CMW 1-21-2024 + """ + + def test_linear(self): + """ + Test whether interpolation is performed correctly. + """ + # check interpolation + x = [1, 2, 3, 4, 5] + y = [1, 4, 5, 6, 8] + x_interp = [1.2, 3.5, 4.5] + y_interp = [1.6, 5.5, 7.] + result = interpolations.linear(x_interp, x, y) + assert_allclose(result[0], y_interp, RTOL) + self.assertIsNone(result[1]) + + # check sorting + x = [1, 3, 2, 4, 5] + y = [1, 5, 4, 7, 8] + x_interp = [1.2, 3.5, 4.5] + y_interp = [1.6, 6, 7.5] + result = interpolations.linear(x_interp, x, y) + assert_allclose(result[0], y_interp, RTOL) + + # check error propagation + x = np.array([1, 2, 3, 4]) + y = np.array([1, 4, 5, 6]) + dy = np.array([0.1, 0.2, 0.3, 0.4]) + x_interp = np.array([1.2, 3.5]) + y_interp = np.array([1.6, 5.5]) + i2 = np.searchsorted(x, x_interp) + i1 = i2-1 + dy_interp = np.sqrt(dy[i1]**2*((x_interp-x[i2])/(x[i1]-x[i2]))**2+dy[i2]**2*((x_interp-x[i1])/(x[i2]-x[i1]))**2) + result = interpolations.linear(x_interp, x, y, dy=dy) + assert_allclose(result[0], y_interp, RTOL) + assert_allclose(result[1], dy_interp, RTOL) + + def test_linear_scales(self): + """ + Test whether interpolation is performed correctly with different scales. + """ + # check linear + x = [1., 2, 3, 4, 5] + y = [1., 4, 5, 6, 8] + x_interp = [1.2, 3.5, 4.5] + y_interp = [1.6, 5.5, 7.] + result = interpolations.linear_scales(x_interp, x, y, scale='linear') + assert_allclose(result[0], y_interp, RTOL) + self.assertIsNone(result[1]) + + # check log + x = np.array([1, 2, 3, 4, 5]) + y = np.array([1, 4, 5, 6, 8]) + dy = np.array([0.1, 0.2, 0.3, 0.4, 0.5]) + x_interp = [1.2, 3.5, 4.5] + + result = interpolations.linear_scales(x_interp, x, y, dy=dy, scale='log') + assert_allclose(result[0], np.array([1.44, 5.5131300913615755, 6.983904974860978]), RTOL) + assert_allclose(result[1], np.array([ + 0.10779966010303317, + 0.24972135075650462, + 0.31845097763629354, + ])) + + # check log + x = np.array([1, 2, 3, 4, 5]) + y = np.array([1, 4, 5, 6, 8]) + x_interp = [1.2, 3.5, 4.5] + + result = interpolations.linear_scales(x_interp, x, y, dy=None, scale='log') + assert_allclose(result[0], np.array([1.44, 5.5131300913615755, 6.983904974860978]), RTOL) + self.assertIsNone(result[1]) + From 142414f160f506e964e3772a86481f0f2415be25 Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Mon, 22 Jan 2024 16:25:36 -0500 Subject: [PATCH 06/10] updating plot --- sasdata/dataloader/data_info.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index 344b49f..e24e626 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -856,6 +856,7 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca # all the points in overlap region of self.x found close matches in overlap region of other.x x_op = self.x[self_overlap_bool] other._operation = other.clone_without_data(x_op.size) + other._operation.copy_from_datainfo(other) other_overlap_index = (np.abs(x_op[:, None] - other.x[None, :])).argmin(axis=1) y_op_other = np.copy(other.y)[other_overlap_index] dy_op_other = np.zeros(y_op_other.size) if other.dy is None else np.copy(other.dy)[other_overlap_index] @@ -870,6 +871,7 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca self_overlap_index = np.flatnonzero(self_overlap_bool) x_op = self.x[self_overlap_bool] other._operation = other.clone_without_data(x_op.size) + other._operation.copy_from_datainfo(other) if scale == 'log': y_op_other, dy_op_other = \ interpolations.linear_scales(x_interp=x_op, x=other.x, y=other.y, dy=other.dy, scale='log') @@ -900,6 +902,7 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca # setup _operation Data1D for self self._operation = self.clone_without_data(self_overlap_index.size) + self._operation.copy_from_datainfo(self) self._operation.x = self.x[self_overlap_bool] self._operation.y = self.y[self_overlap_bool] self._operation.dy = self.dy[self_overlap_bool] if self.dy is not None \ @@ -918,9 +921,10 @@ def _perform_operation(self, other, operation): self._interpolation_operation(other) result = self.clone_without_data(self._operation.x.size) + result.copy_from_datainfo(self) result.x = np.copy(self._operation.x) - # result.y is initialized as arrays of zero with length of _operation.x - # result.dy is initialized as arrays of zero with length of _operation.x + result.y = np.zeros(self._operation.x.size) + result.dy = np.zeros(self._operation.x.size) result.dx = None if self._operation.dx is None else np.copy(self._operation.dx) result.dxl = None if self._operation.dxl is None else np.copy(self._operation.dxl) result.dxw = None if self._operation.dxw is None else np.copy(self._operation.dxw) From 5db8896b86a68704bf4792f7e84133b7b3be689e Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Wed, 31 Jan 2024 12:06:09 -0500 Subject: [PATCH 07/10] added functionality for sesans operations --- sasdata/dataloader/data_info.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index e24e626..59a609d 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -796,7 +796,7 @@ def clone_without_data(self, length=0, clone=None): dy = np.zeros(length) lam = np.zeros(length) dlam = np.zeros(length) - clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam) + clone = Data1D(x, y, lam=lam, dx=dx, dy=dy, dlam=dlam, isSesans=self.isSesans) clone.title = self.title clone.run = self.run @@ -879,7 +879,6 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca y_op_other, dy_op_other = interpolations.linear(x_interp=x_op, x=other.x, y=other.y, dy=other.dy) # setting resolutions and wavelength parameters to None if data is interpolated - # TODO: determine proper propagation of resolution through interpolation dx_op_other = None dxl_op_other = None dxw_op_other = None @@ -917,8 +916,12 @@ def _perform_operation(self, other, operation): """ """ # Check for compatibility of the x-ranges and populate the data used for the operation + # sets up _operation for both datasets # interpolation will be implemented on the 'other' dataset as needed - self._interpolation_operation(other) + if self.isSesans: + self._interpolation_operation(other, scale='linear') + else: + self._interpolation_operation(other, scale='log') result = self.clone_without_data(self._operation.x.size) result.copy_from_datainfo(self) From 2ec4bdb038d021fdefb2dc051d7d0b2a53755289 Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Wed, 31 Jan 2024 12:21:17 -0500 Subject: [PATCH 08/10] added logging message that indicates the operation was completed --- sasdata/dataloader/data_info.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index 59a609d..40bfba5 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -19,6 +19,7 @@ # TODO: This module should be independent of plottables. We should write # an adapter class for plottables when needed. +import logging import math from math import fabs import copy @@ -29,6 +30,8 @@ from sasdata.data_util.uncertainty import Uncertainty from sasdata.data_util import interpolations +logger = logging.getLogger(__name__) + class plottable_1D(object): """ @@ -867,6 +870,7 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca dlam_op_other = None if other.dlam is None else np.copy(other.dlam)[other_overlap_index] else: # not all the points found a close match so implementing interpolation on log scale + logging.info(f"Operation requires interpolation of Data2.") self_overlap_bool = (self.x >= max([self.x.min(), other.x.min()])) & (self.x <= min([self.x.max(), other.x.max()])) self_overlap_index = np.flatnonzero(self_overlap_bool) x_op = self.x[self_overlap_bool] From 70ca1a8a5f4a5629d907d79c75fd8b1561a5d16d Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Tue, 6 Feb 2024 16:40:47 -0500 Subject: [PATCH 09/10] updated handling of uncertainties and resolution during operations --- sasdata/data_util/interpolations.py | 4 +-- sasdata/dataloader/data_info.py | 45 +++++++++++---------------- test/sasdataloader/utest_data_info.py | 44 +++++++++++++------------- 3 files changed, 42 insertions(+), 51 deletions(-) diff --git a/sasdata/data_util/interpolations.py b/sasdata/data_util/interpolations.py index 5a35d1e..9842305 100644 --- a/sasdata/data_util/interpolations.py +++ b/sasdata/data_util/interpolations.py @@ -20,7 +20,7 @@ def linear(x_interp: ArrayLike, x: ArrayLike, y: ArrayLike, dy: Optional[ArrayLi sort = np.argsort(x) x = np.array(x)[sort] y = np.array(y)[sort] - dy = np.array(dy)[sort] if dy is not None else None + dy = np.array(dy)[sort] if (dy is not None and len(dy) == len(y)) else None # find out where the interpolated points fit into the existing data index_2 = np.searchsorted(x, x_interp) @@ -65,7 +65,7 @@ def linear_scales(x_interp: ArrayLike, return result elif scale == "log": - dy = np.array(dy) / y if dy is not None else None + dy = np.array(dy) / y if (dy is not None and len(dy) == len(x)) else None x_interp = np.log(x_interp) x = np.log(x) y = np.log(y) diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index 40bfba5..de3aadd 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -839,8 +839,9 @@ def copy_from_datainfo(self, data1d): def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, scale: Optional[str] = 'log'): """ Checks that x values for two datasets have overlapping ranges for an operation. - If so, _x_op, _y_op, _dx_op, _dy_op, _dxl_op, _dxw_op, _lam, _dlam for both self and other are updated to - values that will be used for the operation. + If so, x, y, and dy will be updated to values used in the operation. + Resolutions, including dx, dxl, and dxw, are not kept through in the operation. + Wavelength parameters for SESANS datasets are also not kept through the operation. :param other: other data for operation :param tolerance: acceptable deviation in matching x data points, default 0.01 (equivalent to 1 % deviation) @@ -862,12 +863,8 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca other._operation.copy_from_datainfo(other) other_overlap_index = (np.abs(x_op[:, None] - other.x[None, :])).argmin(axis=1) y_op_other = np.copy(other.y)[other_overlap_index] - dy_op_other = np.zeros(y_op_other.size) if other.dy is None else np.copy(other.dy)[other_overlap_index] - dx_op_other = None if other.dx is None else np.copy(other.dx)[other_overlap_index] - dxl_op_other = None if other.dxl is None else np.copy(other.dxl)[other_overlap_index] - dxw_op_other = None if other.dxw is None else np.copy(other.dxw)[other_overlap_index] - lam_op_other = None if other.lam is None else np.copy(other.lam)[other_overlap_index] - dlam_op_other = None if other.dlam is None else np.copy(other.dlam)[other_overlap_index] + dy_op_other = np.zeros(x_op.size) if (other.dy is None or other.dy.size == 0) \ + else np.copy(other.dy)[other_overlap_index] else: # not all the points found a close match so implementing interpolation on log scale logging.info(f"Operation requires interpolation of Data2.") @@ -882,12 +879,12 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca else: y_op_other, dy_op_other = interpolations.linear(x_interp=x_op, x=other.x, y=other.y, dy=other.dy) - # setting resolutions and wavelength parameters to None if data is interpolated - dx_op_other = None - dxl_op_other = None - dxw_op_other = None - lam_op_other = None - dlam_op_other = None + # setting resolutions and wavelength parameters to None as these parameters are not carried through + dx_op_other = None + dxl_op_other = None + dxw_op_other = None + lam_op_other = None + dlam_op_other = None other._operation.x = x_op other._operation.y = y_op_other @@ -908,13 +905,13 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca self._operation.copy_from_datainfo(self) self._operation.x = self.x[self_overlap_bool] self._operation.y = self.y[self_overlap_bool] - self._operation.dy = self.dy[self_overlap_bool] if self.dy is not None \ - else np.zeros(self._operation.y.size, dtype=float) - self._operation.dx = self.dx[self_overlap_bool] if self.dx is not None else None - self._operation.dxl = self.dxl[self_overlap_bool] if self.dxl is not None else None - self._operation.dxw = self.dxw[self_overlap_bool] if self.dxw is not None else None - self._operation.lam = self.lam[self_overlap_bool] if self.lam is not None else None - self._operation.dlam = self.dlam[self_overlap_bool] if self.dlam is not None else None + self._operation.dy = np.zeros(self._operation.y.size, dtype=float) if (self.dy is None or self.dy.size==0) \ + else self.dy[self_overlap_bool] + self._operation.dx = None + self._operation.dxl = None + self._operation.dxw = None + self._operation.lam = None + self._operation.dlam = None def _perform_operation(self, other, operation): """ @@ -943,12 +940,6 @@ def _perform_operation(self, other, operation): a = Uncertainty(self._operation.y[i], self._operation.dy[i]**2) if isinstance(other, Data1D): b = Uncertainty(other._operation.y[i], other._operation.dy[i]**2) - if result.dx is not None and other._operation.dx is not None: - result.dx[i] = math.sqrt((self._operation.dx[i]**2 + other._operation.dx[i]**2) / 2) - if result.dxl is not None and other._operation.dxl is not None: - result.dxl[i] = math.sqrt((self._operation.dxl[i]**2 + other._operation.dxl[i]**2) / 2) - if result.dxw is not None and other._operation.dxw is not None: - result.dxw[i] = math.sqrt((self._operation.dxw[i]**2 + other._operation.dxw[i]**2) / 2) else: b = other diff --git a/test/sasdataloader/utest_data_info.py b/test/sasdataloader/utest_data_info.py index 582bb2e..d26f4f0 100644 --- a/test/sasdataloader/utest_data_info.py +++ b/test/sasdataloader/utest_data_info.py @@ -99,7 +99,7 @@ def test_interpolation_operation(self): assert_allclose(np.array([3., 4., 5., 6.]), data1._operation.y, RTOL) assert_allclose(np.array([0.4, 0.5519189701334538, 0.6356450684803129, 0.7]), data2._operation.y, RTOL) - # check these are copied over appropriately with no interpolation + # check handling of resolutions without interpolation # test overlap of x2 at low x1 data1 = Data1D(x=[1, 2, 3, 4], y=[2, 3, 4, 5], @@ -120,20 +120,20 @@ def test_interpolation_operation(self): data1._interpolation_operation(data2) assert_allclose(np.array([0.02, 0.03]), data1._operation.dy, RTOL) - assert_allclose(np.array([0.01, 0.02]), data1._operation.dx, RTOL) - assert_allclose(np.array([10, 11]), data1._operation.lam, RTOL) - assert_allclose(np.array([0.1, 0.11]), data1._operation.dlam, RTOL) - assert_allclose(np.array([0.1, 0.2]), data1._operation.dxl, RTOL) - assert_allclose(np.array([0.4, 0.3]), data1._operation.dxw, RTOL) + self.assertIsNone(data1._operation.dx) + self.assertIsNone(data1._operation.dxl) + self.assertIsNone(data1._operation.dxw) + self.assertIsNone(data1._operation.lam) + self.assertIsNone(data1._operation.dlam) assert_allclose(np.array([0.005, 0.007]), data2._operation.dy, RTOL) - assert_allclose(np.array([0.01, 0.02]), data2._operation.dx, RTOL) - assert_allclose(np.array([12, 11.]), data2._operation.lam, RTOL) - assert_allclose(np.array([0.12, 0.11]), data2._operation.dlam, RTOL) - assert_allclose(np.array([0.6, 0.7]), data2._operation.dxl, RTOL) - assert_allclose(np.array([0.6, 0.5]), data2._operation.dxw, RTOL) + self.assertIsNone(data2._operation.dx) + self.assertIsNone(data2._operation.dxl) + self.assertIsNone(data2._operation.dxw) + self.assertIsNone(data2._operation.lam) + self.assertIsNone(data2._operation.dlam) - # check these are copied over appropriately with interpolation + # check handling of resolutions with interpolation # test overlap of x2 at low x1 data1 = Data1D(x=[1, 1.5, 2, 3], y=[2, 3, 4, 5], @@ -154,11 +154,11 @@ def test_interpolation_operation(self): data1._interpolation_operation(data2) assert_allclose(np.array([0.02, 0.03, 0.04]), data1._operation.dy, RTOL) - assert_allclose(np.array([0.01, 0.02, 0.03]), data1._operation.dx, RTOL) - assert_allclose(np.array([10, 11, 12]), data1._operation.lam, RTOL) - assert_allclose(np.array([0.1, 0.11, 0.12]), data1._operation.dlam, RTOL) - assert_allclose(np.array([0.1, 0.2, 0.3]), data1._operation.dxl, RTOL) - assert_allclose(np.array([0.4, 0.3, 0.2]), data1._operation.dxw, RTOL) + self.assertIsNone(data2._operation.dx) + self.assertIsNone(data2._operation.dxl) + self.assertIsNone(data2._operation.dxw) + self.assertIsNone(data2._operation.lam) + self.assertIsNone(data2._operation.dlam) assert_allclose(np.array([0.005, 0.0043663206993972085, 0.007]), data2._operation.dy) self.assertIsNone(data2._operation.dx) @@ -199,11 +199,11 @@ def operation(a, b): u2 = Uncertainty(np.array([0.5, 0.7]), np.array([0.005**2, 0.007**2])) u3 = u1-u2 assert_allclose(np.sqrt(np.abs(u3.variance)), result.dy, RTOL) - assert_equal(result.lam, data1._operation.lam) - assert_equal(result.dlam, data1._operation.dlam) - assert_allclose(np.sqrt(np.abs((data1._operation.dx**2+data2._operation.dx**2)/2)), result.dx, RTOL) - assert_allclose(np.sqrt(np.abs((data1._operation.dxl**2+data2._operation.dxl**2)/2)), result.dxl, RTOL) - assert_allclose(np.sqrt(np.abs((data1._operation.dxw**2+data2._operation.dxw**2)/2)), result.dxw, RTOL) + self.assertIsNone(result.dx) + self.assertIsNone(result.dxl) + self.assertIsNone(result.dxw) + self.assertIsNone(result.lam) + self.assertIsNone(result.dlam) if __name__ == '__main__': From 650f975445aefbb936cd021af73f954bf5bbc8a4 Mon Sep 17 00:00:00 2001 From: Caitlyn Wolf Date: Tue, 6 Feb 2024 17:27:39 -0500 Subject: [PATCH 10/10] updating loggin messages --- sasdata/dataloader/data_info.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index de3aadd..5a8f40c 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -867,7 +867,6 @@ def _interpolation_operation(self, other, tolerance: Optional[float] = 0.01, sca else np.copy(other.dy)[other_overlap_index] else: # not all the points found a close match so implementing interpolation on log scale - logging.info(f"Operation requires interpolation of Data2.") self_overlap_bool = (self.x >= max([self.x.min(), other.x.min()])) & (self.x <= min([self.x.max(), other.x.max()])) self_overlap_index = np.flatnonzero(self_overlap_bool) x_op = self.x[self_overlap_bool] @@ -942,7 +941,6 @@ def _perform_operation(self, other, operation): b = Uncertainty(other._operation.y[i], other._operation.dy[i]**2) else: b = other - output = operation(a, b) result.y[i] = output.x result.dy[i] = math.sqrt(math.fabs(output.variance))