Skip to content

Commit

Permalink
Removed unused keywords
Browse files Browse the repository at this point in the history
- `store_data` kwarg was removed as it wasn't used. Similarly, the logic
  for storing data between iterations was streamlined, reducing the
number of copies of the data and its reconstructions.
- `suppress_growth` kwarg was removed. This kwarg was a holdover from
  the original implementation in matlab. Testing revealed it likely was
a consequence of a mistake in defining frequency bands on the complex
conjugate and not the imaginary component.
  • Loading branch information
klapo committed Jan 29, 2024
1 parent f8e95d0 commit 93a09a4
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 51 deletions.
6 changes: 0 additions & 6 deletions pydmd/costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -827,7 +827,6 @@ def global_reconstruction(self, scale_reconstruction_kwargs=None):

def scale_reconstruction(
self,
suppress_growth=False,
include_means=True,
):
"""Reconstruct the spatiotemporal features for each frequency band.
Expand All @@ -838,7 +837,6 @@ def scale_reconstruction(
and end of time series prone to larger errors. A best practice is
to cut off `window_length` from each end before further analysis.
:param suppress_growth: Not API stable
:param include_means: Not API stable
:return: Reconstruction for each frequency band with dimensions of:
n_components x n_data_vars x n_time_steps
Expand All @@ -863,13 +861,9 @@ def scale_reconstruction(

w = self._modes_array[k]
b = self._amplitudes_array[k]
# @ToDo: global flag for suppressing growth?
omega = copy.deepcopy(np.atleast_2d(self._omega_array[k]).T)
classification = self._omega_classes[k]

if suppress_growth:
omega[omega.real > 0] = 1j * omega[omega.real > 0].imag

c = np.atleast_2d(self._window_means_array[k]).T

# Compute each segment of the reconstructed data starting at "t = 0"
Expand Down
52 changes: 7 additions & 45 deletions pydmd/mrcosts.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,7 @@ def __init__(
n_components_array=None,
cluster_sweep=False,
transform_method=None,
store_data=True,
):
self._store_data = store_data
self._n_components_array = n_components_array
self._step_size_array = step_size_array
self._window_length_array = window_length_array
Expand Down Expand Up @@ -91,23 +89,9 @@ def __init__(
self._pydmd_kwargs["use_proj"] = pydmd_kwargs.get("use_proj", False)

if costs_recon_kwargs is None:
self._costs_recon_kwargs = {
"suppress_growth": False,
}
self._costs_recon_kwargs = {}
else:
self._costs_recon_kwargs = costs_recon_kwargs
self._costs_recon_kwargs[
"suppress_growth"
] = costs_recon_kwargs.get("suppress_growth", False)

@property
def store_data(self):
"""
:return: If the low-frequency components were stored (True)
or discarded (False).
:rtype: bool
"""
return self._store_data

@property
def costs_array(self):
Expand Down Expand Up @@ -297,24 +281,13 @@ def fit(self, data, time, verbose=True):
self._costs_array = []
self._n_time_steps, self._n_data_vars = self._data_shape(data)

if self._store_data:
data_iter = np.zeros(
(n_decompositions, self._n_data_vars, self._n_time_steps)
)
data_iter[0, :, :] = data
else:
data_iter = data
x_iter = data

for n_decomp, (window, step, rank) in enumerate(
zip(window_lengths, step_sizes, svd_ranks)
):
global_svd = self._global_svd_array[n_decomp]

if self._store_data:
x_iter = data_iter[n_decomp, :, :].squeeze()
else:
x_iter = data_iter.squeeze()

mrd = COSTS(
svd_rank=rank,
global_svd=global_svd,
Expand Down Expand Up @@ -349,17 +322,13 @@ def fit(self, data, time, verbose=True):
re = mrd.relative_error(global_reconstruction.real, x_iter)
print("Error in Global Reconstruction = {:.2}".format(re))

# Scale separation
xr_low_frequency, xr_high_frequency = mrd.scale_separation(
scale_reconstruction_kwargs=self._costs_recon_kwargs
)

# Pass the low frequency component to the next level of decomposition.
if n_decomp < n_decompositions - 1:
if self._store_data:
data_iter[n_decomp + 1, :, :] = xr_low_frequency
else:
data_iter = xr_low_frequency
# Scale separation
xr_low_frequency, xr_high_frequency = mrd.scale_separation(
scale_reconstruction_kwargs=self._costs_recon_kwargs
)
x_iter = xr_low_frequency

# Save the fitted costs object.
self._costs_array.append(copy.copy(mrd))
Expand Down Expand Up @@ -505,7 +474,6 @@ def from_netcdf(self, file_list):
global_svd_array=global_svd_array,
pydmd_kwargs=pydmd_kwargs,
n_components_array=n_components_array,
store_data=False,
)

# Initialize variables that are defined in fitting.
Expand Down Expand Up @@ -923,7 +891,6 @@ def transform_omega(omega_array, transform_method=None):

def global_scale_reconstruction(
self,
suppress_growth=False,
):
"""Reconstruct mrCOSTS into the constituent frequency bands.
Expand All @@ -933,8 +900,6 @@ def global_scale_reconstruction(
and end of time series prone to larger errors. A best practice is
to cut off `window_length` from each end before further analysis.
:param suppress_growth: Kill positive real components of frequencies.
Should not be considered a stable api variable.
:param n_components: Number of frequency bands from the clustering.
:type n_components: int
:param omega_classes_list: Resulting cluster identifiers from clustering omega.
Expand Down Expand Up @@ -982,9 +947,6 @@ def global_scale_reconstruction(
omega = np.atleast_2d(mrd._omega_array[k]).T
classification = omega_classes[k]

if suppress_growth:
omega[omega.real > 0] = 1j * omega[omega.real > 0].imag

# Compute each segment of xr starting at "t = 0"
t = mrd._time_array[k]
t_start = mrd._time_array[k, 0]
Expand Down

0 comments on commit 93a09a4

Please sign in to comment.