Skip to content

Commit

Permalink
Replaced numpy.asfarray with numpy.asarray using the proper dtype.
Browse files Browse the repository at this point in the history
  • Loading branch information
skrylow authored and skrylow committed Aug 29, 2024
1 parent d7bea3f commit 6d86627
Show file tree
Hide file tree
Showing 7 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion KDEpy/BaseKDE.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def _process_sequence(sequence_array_like):
"""
# Must convert to float to avoid possible interger overflow
if isinstance(sequence_array_like, Sequence):
out = np.asfarray(sequence_array_like).reshape(-1, 1)
out = np.asarray(sequence_array_like, dtype=float).reshape(-1, 1)
elif isinstance(sequence_array_like, np.ndarray):
if len(sequence_array_like.shape) == 1:
out = sequence_array_like.reshape(-1, 1)
Expand Down
2 changes: 1 addition & 1 deletion KDEpy/NaiveKDE.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def evaluate(self, grid_points=None):
# For every data point, compute the kernel and add to the grid
bw = self.bw
if isinstance(bw, numbers.Number):
bw = np.asfarray(np.ones(self.data.shape[0]) * bw)
bw = np.asarray(np.ones(self.data.shape[0]) * bw, dtype=float)

# TODO: Implementation w.r.t grid points for faster evaluation
# See the SciPy evaluation for how this can be done
Expand Down
2 changes: 1 addition & 1 deletion KDEpy/TreeKDE.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def evaluate(self, grid_points=None, eps=10e-4):
obs, dims = self.data.shape
bw = self.bw
if isinstance(bw, numbers.Number):
bw = np.asfarray(np.ones(obs) * bw)
bw = np.asarray(np.ones(obs) * bw, dtype=float)
else:
bw = np.asarray_chkfinite(bw, dtype=float)

Expand Down
14 changes: 7 additions & 7 deletions KDEpy/binning.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,16 +104,16 @@ def linbin_cython(data, grid_points, weights=None):
dx = (max_grid - min_grid) / num_intervals
transformed_data = (data - min_grid) / dx

result = np.asfarray(np.zeros(num_intervals + 2))
result = np.asarray(np.zeros(num_intervals + 2), dtype=float)

# Two Cython functions are implemented, one for weighted data and one
# for unweighted data, since creating equal weights is costly w.r.t time
if weights is None:
result = _cutils.iterate_data_1D(transformed_data, result)
return np.asfarray(result[:-1]) / transformed_data.shape[0]
return np.asarray(result[:-1], dtype=float) / transformed_data.shape[0]
else:
res = _cutils.iterate_data_1D_weighted(transformed_data, weights, result)
return np.asfarray(res[:-1]) # Remove last, outside of grid
return np.asarray(res[:-1], dtype=float) # Remove last, outside of grid


def linbin_numpy(data, grid_points, weights=None):
Expand Down Expand Up @@ -197,7 +197,7 @@ def linbin_numpy(data, grid_points, weights=None):
unique_integrals = np.unique(integral)
unique_integrals = unique_integrals[(unique_integrals >= 0) & (unique_integrals <= len(grid_points))]

result = np.asfarray(np.zeros(len(grid_points) + 1))
result = np.asarray(np.zeros(len(grid_points) + 1), dtype=float)
for grid_point in unique_integrals:
# Use binary search to find indices for the grid point
# Then sum the data assigned to that grid point
Expand Down Expand Up @@ -337,7 +337,7 @@ def linbin_Ndim(data, grid_points, weights=None):

# Compute the number of grid points for each dimension in the grid
grid_num = (grid_points[:, i] for i in range(dims))
grid_num = np.array(list(len(np.unique(g)) for g in grid_num))
grid_num = np.array(list(len(np.unique(g)) for g in grid_num), dtype="int32")

# Scale the data to the grid
min_grid = np.min(grid_points, axis=0)
Expand All @@ -356,7 +356,7 @@ def linbin_Ndim(data, grid_points, weights=None):
# Weighted data has two specific routines
if weights is not None:
if data_dims >= 3:
binary_flgs = cartesian(([0, 1],) * dims)
binary_flgs = cartesian(([0, 1],) * dims).astype("int32")
result = _cutils.iterate_data_ND_weighted(data, weights, result, grid_num, obs_tot, binary_flgs)
else:
result = _cutils.iterate_data_2D_weighted(data, weights, result, grid_num, obs_tot)
Expand All @@ -367,7 +367,7 @@ def linbin_Ndim(data, grid_points, weights=None):
# specialize routine for this case.
else:
if data_dims >= 3:
binary_flgs = cartesian(([0, 1],) * dims)
binary_flgs = cartesian(([0, 1],) * dims).astype("int32")
result = _cutils.iterate_data_ND(data, result, grid_num, obs_tot, binary_flgs)
else:
result = _cutils.iterate_data_2D(data, result, grid_num, obs_tot)
Expand Down
4 changes: 2 additions & 2 deletions KDEpy/bw_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def _fixed_point(t, N, I_sq, a2):
"""

# This is important, as the powers might overflow if not done
I_sq = np.asfarray(I_sq, dtype=FLOAT)
a2 = np.asfarray(a2, dtype=FLOAT)
I_sq = np.asarray(I_sq, dtype=FLOAT)
a2 = np.asarray(a2, dtype=FLOAT)

# ell = 7 corresponds to the 5 steps recommended in the paper
ell = 7
Expand Down
2 changes: 1 addition & 1 deletion KDEpy/tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def test_api_models_kernels_bandwidths(kde1, kde2, bw, kernel):
assert err < 0.002


type_functions = [tuple, np.array, np.asfarray, lambda x: np.asfarray(x).reshape(-1, 1)]
type_functions = [tuple, np.array, np.asarray, lambda x: np.asarray(x, dtype=float).reshape(-1, 1)]


@pytest.mark.parametrize(
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
name = "KDEpy"
version = "1.1.9"
dependencies = [
"numpy>=1.14.2,<2.0",
"numpy>=1.14.2",
"scipy>=1.0.1,<2.0",
]
description = "Kernel Density Estimation in Python."
Expand Down

0 comments on commit 6d86627

Please sign in to comment.