Skip to content

Commit

Permalink
reformat w ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
tjlane committed Aug 11, 2024
1 parent 537eb2f commit 2a14c79
Show file tree
Hide file tree
Showing 6 changed files with 100 additions and 99 deletions.
71 changes: 34 additions & 37 deletions meteor/dsutils.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,40 @@

import numpy as np
import gemmi as gm
import reciprocalspaceship as rs
from scipy.stats import binned_statistic


def res_cutoff(df, h_res, l_res) :
def res_cutoff(df, h_res, l_res):
"""
Apply specified low and high resolution cutoffs to rs.Dataset.
"""
df = df.loc[(df['dHKL'] >= h_res) & (df['dHKL'] <= l_res)]
df = df.loc[(df["dHKL"] >= h_res) & (df["dHKL"] <= l_res)]
return df


def resolution_shells(data, dhkl, n):
"""Average data in n resolution shells"""

""" Average data in n resolution shells """

mean_data = binned_statistic(dhkl, data, statistic='mean', bins=n, range=(np.min(dhkl), np.max(dhkl)))
bin_centers = (mean_data.bin_edges[:-1] + mean_data.bin_edges[1:]) / 2
mean_data = binned_statistic(
dhkl, data, statistic="mean", bins=n, range=(np.min(dhkl), np.max(dhkl))
)
bin_centers = (mean_data.bin_edges[:-1] + mean_data.bin_edges[1:]) / 2

return bin_centers, mean_data.statistic


def adjust_phi_interval(phi):
"""Given a set of phases, return the equivalent in -180 <= phi <= 180 interval"""

""" Given a set of phases, return the equivalent in -180 <= phi <= 180 interval"""
phi = phi % 360
phi[phi > 180] -= 360

phi = phi%360
phi[phi > 180] -=360

assert np.min(phi) >= -181
assert np.max(phi) <= 181

return phi


def positive_Fs(df, phases, Fs, phases_new, Fs_new):

"""
Convert between an MTZ format where difference structure factor amplitudes are saved as both positive and negative, to format where they are only positive.
Expand All @@ -51,48 +48,48 @@ def positive_Fs(df, phases, Fs, phases_new, Fs_new):
Returns :
rs.Dataset with new labels
"""

new_phis = df[phases].copy(deep=True)
new_Fs = df[Fs].copy(deep=True)
negs = np.where(df[Fs]<0)
new_Fs = df[Fs].copy(deep=True)

negs = np.where(df[Fs] < 0)

df[phases] = adjust_phi_interval(df[phases])

for i in negs:
new_phis.iloc[i] = df[phases].iloc[i]+180
new_Fs.iloc[i] = np.abs(new_Fs.iloc[i])
new_phis = adjust_phi_interval(new_phis)

df_new = df.copy(deep=True)
df_new[Fs_new] = new_Fs
df_new[Fs_new] = df_new[Fs_new].astype("SFAmplitude")
df_new[phases_new] = new_phis
df_new[phases_new] = df_new[phases_new].astype("Phase")
new_phis.iloc[i] = df[phases].iloc[i] + 180
new_Fs.iloc[i] = np.abs(new_Fs.iloc[i])

new_phis = adjust_phi_interval(new_phis)

df_new = df.copy(deep=True)
df_new[Fs_new] = new_Fs
df_new[Fs_new] = df_new[Fs_new].astype("SFAmplitude")
df_new[phases_new] = new_phis
df_new[phases_new] = df_new[phases_new].astype("Phase")

return df_new


def map_from_Fs(dataset, Fs, phis, map_res):

"""
Return a GEMMI CCP4 map object from an rs.Dataset object
Parameters :
dataset : rs.Dataset of interest
Fs, phis : (str) and (str) labels for amplitudes and phases to be used
map_res : (float) to determine map spacing resolution
"""

mtz = dataset.to_gemmi()
ccp4 = gm.Ccp4Map()
ccp4.grid = mtz.transform_f_phi_to_map('{}'.format(Fs), '{}'.format(phis), sample_rate=map_res)
ccp4.grid = mtz.transform_f_phi_to_map(
"{}".format(Fs), "{}".format(phis), sample_rate=map_res
)
ccp4.update_ccp4_header(2, True)

return ccp4

return ccp4
6 changes: 0 additions & 6 deletions meteor/maps.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
import numpy as np
import gemmi as gm
from tqdm import tqdm
from meteor import dsutils, validate, mask
from scipy.stats import kurtosis

from . import scale
from . import io
Expand Down Expand Up @@ -105,6 +102,3 @@ def find_w_diffs(mtz, Fon, Foff, SIGon, SIGoff, pdb, high_res, path, a, Nbg=1.00
mtz.infer_mtz_dtypes(inplace=True)

return mtz, ws



1 change: 0 additions & 1 deletion meteor/meteor_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ def get_pdbinfo(pdb):
with open(pdb, "r") as f:
for line in f:
if line.startswith("CRYST1"):

split_line = line.strip().split()
unit_cell = [float(i) for i in split_line[1:7]]
space_group = "".join(split_line[7:11])
Expand Down
91 changes: 53 additions & 38 deletions meteor/scale.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@

import numpy as np
import scipy.optimize as opt

def scale_iso(data1, data2, ds):

def scale_iso(data1, data2, ds):
"""
Isotropic resolution-dependent scaling of data2 to data1.
(minimize [dataset1 - c*exp(-B*sintheta**2/lambda**2)*dataset2]
Expand All @@ -22,64 +21,80 @@ def scale_iso(data1, data2, ds):
2. scaled dataset2 in the form of a 1D numpy array
"""

def scale_func(p, x1, x2, qs):
return x1 - (p[0]*np.exp(-p[1]*(qs**2)))*x2
return x1 - (p[0] * np.exp(-p[1] * (qs**2))) * x2

p0 = np.array([1.0, -20])
qs = 1/(2*ds)
qs = 1 / (2 * ds)
matrix = opt.least_squares(scale_func, p0, args=(data1, data2, qs))

return matrix.x[0], matrix.x[1], (matrix.x[0]*np.exp(-matrix.x[1]*(qs**2)))*data2

return (
matrix.x[0],
matrix.x[1],
(matrix.x[0] * np.exp(-matrix.x[1] * (qs**2))) * data2,
)

def scale_aniso(x_dataset, y_dataset, Miller_indx):

""""
def scale_aniso(x_dataset, y_dataset, Miller_indx):
""" "
Author: Virginia Apostolopoulou
Anisotropically scales y_dataset to x_dataset given an ndarray of Miller indices.
"""

p0 = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)
matrix_ani = opt.least_squares(aniso_scale_func, p0, args=(x_dataset, y_dataset, Miller_indx))
matrix_ani = opt.least_squares(
aniso_scale_func, p0, args=(x_dataset, y_dataset, Miller_indx)
)

h = Miller_indx[:,0]
k = Miller_indx[:,1]
l = Miller_indx[:,2]
h = Miller_indx[:, 0]
k = Miller_indx[:, 1]
l = Miller_indx[:, 2]

Check failure on line 52 in meteor/scale.py

View workflow job for this annotation

GitHub Actions / build (3.11)

Ruff (E741)

meteor/scale.py:52:5: E741 Ambiguous variable name: `l`
h_sq = np.square(h)
k_sq = np.square(k)
l_sq = np.square(l)

hk_prod = h*k
hl_prod = h*l
kl_prod = k*l
hk_prod = h * k
hl_prod = h * l
kl_prod = k * l

t = - (h_sq * matrix_ani.x[1] + k_sq * matrix_ani.x[2] + l_sq * matrix_ani.x[3]
+ 2*hk_prod * matrix_ani.x[4] + 2*hl_prod * matrix_ani.x[5] + 2*kl_prod * matrix_ani.x[6])
t = -(
h_sq * matrix_ani.x[1]
+ k_sq * matrix_ani.x[2]
+ l_sq * matrix_ani.x[3]
+ 2 * hk_prod * matrix_ani.x[4]
+ 2 * hl_prod * matrix_ani.x[5]
+ 2 * kl_prod * matrix_ani.x[6]
)

data_ani_scaled = (matrix_ani.x[0]*np.exp(t))*y_dataset

return matrix_ani, t, data_ani_scaled
data_ani_scaled = (matrix_ani.x[0] * np.exp(t)) * y_dataset

return matrix_ani, t, data_ani_scaled

def aniso_scale_func(p, x1, x2, H_arr):

def aniso_scale_func(p, x1, x2, H_arr):
"Author: Virginia Apostolopoulou"
h = H_arr[:,0]
k = H_arr[:,1]
l = H_arr[:,2]

h = H_arr[:, 0]
k = H_arr[:, 1]
l = H_arr[:, 2]

Check failure on line 80 in meteor/scale.py

View workflow job for this annotation

GitHub Actions / build (3.11)

Ruff (E741)

meteor/scale.py:80:5: E741 Ambiguous variable name: `l`

h_sq = np.square(h)
k_sq = np.square(k)
l_sq = np.square(l)

hk_prod = h*k
hl_prod = h*l
kl_prod = k*l

t = - (h_sq * p[1] + k_sq * p[2] + l_sq * p[3] +
2*hk_prod * p[4] + 2*hl_prod * p[5] + 2*kl_prod * p[6])
expnt = np.exp( t )
r = x1 - p[0] * expnt * x2
return r

hk_prod = h * k
hl_prod = h * l
kl_prod = k * l

t = -(
h_sq * p[1]
+ k_sq * p[2]
+ l_sq * p[3]
+ 2 * hk_prod * p[4]
+ 2 * hl_prod * p[5]
+ 2 * kl_prod * p[6]
)
expnt = np.exp(t)
r = x1 - p[0] * expnt * x2
return r
20 changes: 5 additions & 15 deletions meteor/tv.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,10 @@ def tv_denoise_single_pass(mtz):
def find_TV_reg_lambda(
mtz,
Flabel,

):

print("Scanning TV weights")
lambdas = np.linspace(1e-8, 0.4, 100)

entropies = []
amp_changes = []
phase_changes = []
Expand All @@ -25,12 +23,12 @@ def find_TV_reg_lambda(
fit_TV_map, entropy = TV_filter(
fit_map, l, fit_map.grid.shape, cell, space_group

Check failure on line 24 in meteor/tv.py

View workflow job for this annotation

GitHub Actions / build (3.11)

Ruff (F821)

meteor/tv.py:24:45: F821 Undefined name `cell`

Check failure on line 24 in meteor/tv.py

View workflow job for this annotation

GitHub Actions / build (3.11)

Ruff (F821)

meteor/tv.py:24:51: F821 Undefined name `space_group`
)
Fs_TV = dsutils.from_gemmi(io.map2mtz(fit_TV_map, highres))
dsutils.from_gemmi(io.map2mtz(fit_TV_map, highres))

Check failure on line 26 in meteor/tv.py

View workflow job for this annotation

GitHub Actions / build (3.11)

Ruff (F821)

meteor/tv.py:26:9: F821 Undefined name `dsutils`

entropies.append(entropy)
amp_changes.append(amp_change)
phase_changes.append(phase_change)

entropies = np.array(entropies)
best_entropy = np.max(entropies)
lambda_best_entr = lambdas[np.argmax(entropies)]
Expand All @@ -40,14 +38,11 @@ def find_TV_reg_lambda(
TVmap_best_entr, _ = TV_filter(
fit_map, lambda_best_entr, fit_map.grid.shape, cell, space_group
)

return TVmap_best_entr,


return (TVmap_best_entr,)


def TV_filter(map, l, grid_size, cell, space_group):

"""
Apply TV filtering to a Gemmi map object. Compute negentropy for denoised array.
Expand All @@ -63,8 +58,6 @@ def TV_filter(map, l, grid_size, cell, space_group):
Denoised map (GEMMI object) and associated negentropy (float)
"""



TV_arr = denoise_tv_chambolle(
np.array(map.grid), eps=0.00000005, weight=l, max_num_iter=50
)
Expand Down Expand Up @@ -111,7 +104,6 @@ def TV_iteration(
"""


return new_amps, new_phases, test_proj_error, entropy, phase_change, z


Expand Down Expand Up @@ -143,5 +135,3 @@ def TV_projection(Foff, Fon, phi_calc, F_plus, phi_plus, ws):
proj_error = np.absolute(np.absolute(z) - Fon)

return new_amps, new_phases, proj_error, np.angle(z, deg=True)


10 changes: 8 additions & 2 deletions meteor/validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,14 @@ def negentropy(samples: np.ndarray, tolerance: float = 1e-4) -> float:
"""

std = np.std(samples.flatten())
neg_e = 0.5 * np.log(2.0 * np.pi * std**2) + 0.5 - differential_entropy(samples.flatten())
neg_e = (
0.5 * np.log(2.0 * np.pi * std**2)
+ 0.5
- differential_entropy(samples.flatten())
)
if not neg_e >= -tolerance:
raise ValueError(f"negentropy is a relatively big negative number {neg_e} that exceeds the tolerance {tolerance} -- something may have gone wrong")
raise ValueError(
f"negentropy is a relatively big negative number {neg_e} that exceeds the tolerance {tolerance} -- something may have gone wrong"
)

return neg_e

0 comments on commit 2a14c79

Please sign in to comment.