Skip to content

Commit

Permalink
added suggestions by flake8 and black command in several files
Browse files Browse the repository at this point in the history
  • Loading branch information
mrosskopf committed Aug 27, 2024
1 parent 8943953 commit 3ab60d2
Show file tree
Hide file tree
Showing 21 changed files with 328 additions and 277 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
[flake8]
max-line-length = 120
exclude = dug_seis/graphical_interface/ui_files
extend-ignore = E203
extend-ignore = E203, F841, F401, F821
277 changes: 145 additions & 132 deletions dug_seis/db/sqlite_db_backend.py

Large diffs are not rendered by default.

16 changes: 10 additions & 6 deletions dug_seis/event_processing/detection/coincidence_trigger.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def coincidence_trigger(
details=False,
event_templates={},
similarity_threshold=0.7,
**options
**options,
):
"""
Perform a network coincidence trigger.
Expand Down Expand Up @@ -187,11 +187,15 @@ def util_val_of_scalar_or_list(elem, idx):
else:
kernel_size = 2000
kernel = np.ones(kernel_size) / kernel_size
data_filtered = np.convolve(tr.data, kernel, mode='same')
time = np.arange(0, len(data_filtered)) * 1 / st.traces[0].stats.sampling_rate
data_filtered = np.convolve(tr.data, kernel, mode="same")
time = (
np.arange(0, len(data_filtered)) * 1 / st.traces[0].stats.sampling_rate
)
data_filtered_dif = np.gradient(data_filtered) / np.gradient(time)
data_filtered_dif_filtered = np.convolve(data_filtered_dif, kernel, mode='same')
tr.data = -1 * data_filtered_dif_filtered/1000
data_filtered_dif_filtered = np.convolve(
data_filtered_dif, kernel, mode="same"
)
tr.data = -1 * data_filtered_dif_filtered / 1000
# end of adjustments
kwargs["max_len"] = int(max_trigger_length * tr.stats.sampling_rate + 0.5)

Expand All @@ -202,7 +206,7 @@ def util_val_of_scalar_or_list(elem, idx):
tr.data,
util_val_of_scalar_or_list(thr_on, idx),
util_val_of_scalar_or_list(thr_off, idx),
**kwargs
**kwargs,
)
# end of adjustments
for on, off in tmp_triggers:
Expand Down
89 changes: 54 additions & 35 deletions dug_seis/event_processing/magnitudes/amplitude_based_magnitudes.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,16 @@ def is_time_between(begin_time, end_time, check_time):

def amplitude_based_relative_magnitude(st_event, event):
# main parameters for magnitude processing
V_S = 3100. # [m/s]
V_S = 3100.0 # [m/s]
filter_freq_min = 3e3 # [Hz]
filter_freq_max = 12e3 # [Hz]
filter_corners = 4
filter_zerophase = "false"
signal_copy = st_event.copy()
noise_copy = st_event.copy()
gainLogAE = 100 # 10dB + 30dB
gainAE = 1. / gainLogAE
Count2VoltAE = 10. / 32000 # 10V on 32000 samples
gainAE = 1.0 / gainLogAE
Count2VoltAE = 10.0 / 32000 # 10V on 32000 samples
conversion_factor_counts_mV = gainAE * Count2VoltAE # assuming 64'000 (not 2**16 = 65536) counts
# resolution per +/-10V (direct communication by GMuG) as well as 30dB pre-amplification
# and 10 dB amplification from the supply/filter unit
Expand All @@ -58,23 +58,31 @@ def amplitude_based_relative_magnitude(st_event, event):
Q = 76.0 # Quality factor [] introduced by Hansruedi Maurer (Email 29.07.2021)
V_P = 5100.0 # P-wave velocity [m/s]
r_0 = 10.0 # reference distance [m]
### For amplitude
# For amplitude
p_amp = []
n_amp = []
t_window = []
distances = []
### For magnitude
# For magnitude
s_m = []
Mr_station = []

count = 0
tmpPicks = event.picks
if len(tmpPicks)>len(event.preferred_origin().arrivals):
idx = [i for i,x in enumerate(tmpPicks) if x.evaluation_mode=='manual']
if len(tmpPicks) > len(event.preferred_origin().arrivals):
idx = [i for i, x in enumerate(tmpPicks) if x.evaluation_mode == "manual"]
PicksManual = [tmpPicks[x].waveform_id for x in idx]
idx2 = [i for i,x in enumerate(tmpPicks) if x.waveform_id in PicksManual and x.evaluation_mode=='automatic']
idx2 = [
i
for i, x in enumerate(tmpPicks)
if x.waveform_id in PicksManual and x.evaluation_mode == "automatic"
]
PickDouble = [tmpPicks[x].waveform_id for x in idx2]
Picks = [x for x in tmpPicks if x.waveform_id not in PickDouble or x.evaluation_mode=='manual']
Picks = [
x
for x in tmpPicks
if x.waveform_id not in PickDouble or x.evaluation_mode == "manual"
]
else:
Picks = tmpPicks

Expand All @@ -85,7 +93,7 @@ def amplitude_based_relative_magnitude(st_event, event):
delta_p_s = dist / V_S - dist / V_P
signal_window_start_time = pick.time - 0.5 * delta_p_s
signal_window_end_time = pick.time + delta_p_s
noise_window_start_time = pick.time - 2.5*delta_p_s
noise_window_start_time = pick.time - 2.5 * delta_p_s
noise_window_end_time = pick.time - 0.5 * delta_p_s

# skip magnitude computation if
Expand Down Expand Up @@ -115,7 +123,7 @@ def amplitude_based_relative_magnitude(st_event, event):
noise_window_end_time,
)
):
#print('Possible error')
# print('Possible error')
continue

distances.append(
Expand Down Expand Up @@ -162,56 +170,67 @@ def amplitude_based_relative_magnitude(st_event, event):
np.abs(noise.data), 95
) # take 95 % percentile to omit outliers
n_amp.append(noise_95pers * conversion_factor_counts_mV)
if n_amp[count]!=0:
if n_amp[count] != 0:
SNR = p_amp[count] / n_amp[count]
else:
SNR = 0

event.amplitudes.append(
Amplitude(resource_id=f"amplitude/p_wave/{uuid.uuid4()}",
generic_amplitude=p_amp[count],
type='AMB',
unit='other',
snr=SNR,
waveform_id=WaveformStreamID(network_code=pick.waveform_id.network_code,
station_code=pick.waveform_id.station_code,
location_code=pick.waveform_id.location_code,
channel_code=pick.waveform_id.channel_code),
time_window=TimeWindow(begin=t_window[count].begin, end=t_window[count].end,
reference=t_window[count].reference)))

Amplitude(
resource_id=f"amplitude/p_wave/{uuid.uuid4()}",
generic_amplitude=p_amp[count],
type="AMB",
unit="other",
snr=SNR,
waveform_id=WaveformStreamID(
network_code=pick.waveform_id.network_code,
station_code=pick.waveform_id.station_code,
location_code=pick.waveform_id.location_code,
channel_code=pick.waveform_id.channel_code,
),
time_window=TimeWindow(
begin=t_window[count].begin,
end=t_window[count].end,
reference=t_window[count].reference,
),
)
)

corr_fac_1 = np.exp(np.pi * (dist - r_0) * f_0 / (Q * V_P))
# correction for geometrical spreading
corr_fac_2 = dist / r_0
# station magnitude computation
if p_amp[count]==0:
if p_amp[count] == 0:
continue
tmpMrSta = np.log10(p_amp[count] * corr_fac_2 * corr_fac_1)
Mr_station.append(tmpMrSta)
# append station magnitude to event
event.station_magnitudes.append(
StationMagnitude(resource_id=f"station_magnitude/p_wave_magnitude/relative/{uuid.uuid4()}",
origin_id=event.preferred_origin_id.id,
mag= -2.25 + 0.66 * tmpMrSta,
station_magnitude_type='MwA',
amplitude_id=event.amplitudes[count].resource_id))
StationMagnitude(
resource_id=f"station_magnitude/p_wave_magnitude/relative/{uuid.uuid4()}",
origin_id=event.preferred_origin_id.id,
mag=-2.25 + 0.66 * tmpMrSta,
station_magnitude_type="MwA",
amplitude_id=event.amplitudes[count].resource_id,
)
)
# store station magnitude contribution
s_m.append(
StationMagnitudeContribution(
station_magnitude_id="smi:local/" + event.station_magnitudes[count].resource_id.id,
weight=1 / len(event.amplitudes)))
weight=1 / len(event.amplitudes),
)
)

count+=1
count += 1

if not event.amplitudes: # if no amplitudes are assigned return from the definition
delattr(event, 'amplitudes')
delattr(event, "amplitudes")
return event


Mr_station = np.array(Mr_station)
# Mr_network = np.log10(np.sqrt(np.sum((10**Mr_station)**2) / len(Mr_station))) # network magnitude
Mr_network = np.sum(Mr_station) / len(Mr_station) # network magnitude
Mr_network = np.sum(Mr_station) / len(Mr_station) # network magnitude
MA_network = -2.25 + 0.66 * Mr_network # temporary relation deduced from VALTER Stimulaiton1, using individual stations estimations

# Create network magnitude and add station magnitude contribution
Expand Down
5 changes: 3 additions & 2 deletions dug_seis/event_processing/picking/dug_picker.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,8 +272,9 @@ def sta_lta(stream, st_window, lt_window, thresholds):
)
# calculate snr
try:
snr = max(abs(trace.data[trig[0][0] + 10:trig[0][0] + 100])) / max(
abs(trace.data[trig[0][0] - 100:trig[0][0] - 10]))
snr = max(abs(trace.data[trig[0][0] + 10 : trig[0][0] + 100])) / max(
abs(trace.data[trig[0][0] - 100 : trig[0][0] - 10])
)
except ValueError:
continue

Expand Down
5 changes: 4 additions & 1 deletion dug_seis/event_processing/picking/picker_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
"""
import numpy as np


def mask_list_indices(lst, indices):
"""
Function that masks list depending on indices.
Expand Down Expand Up @@ -47,7 +48,9 @@ def time_delta_between_picks(picks, thr_time_deltas):
# find time deltas below threshold
ind_low_time_delta = np.unique(np.where(time_deltas <= thr_time_deltas))
nr_low_time_deltas = len(ind_low_time_delta)
ind_high_time_delta = np.setdiff1d(np.arange(0, len(time_deltas), 1, dtype=int), ind_low_time_delta)
ind_high_time_delta = np.setdiff1d(
np.arange(0, len(time_deltas), 1, dtype=int), ind_low_time_delta
)
nr_high_time_deltas = len(ind_high_time_delta)
picks_masked = mask_list_indices(picks, ind_high_time_delta)
return picks_masked, nr_low_time_deltas, nr_high_time_deltas
2 changes: 1 addition & 1 deletion dug_seis/event_processing/picking/picker_virginie.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,4 +281,4 @@ def virginie_picker_per_trace(
method_id="FBKT",
phase_hint="P",
evaluation_mode="automatic",
)
)
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ def __init__(
pol_coeff=10,
uncert_coeff=3,
):

"""
Parameter description:
t_ma : the time in seconds of the moving average window for dynamic threshold
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ def __init__(
pol_coeff=10,
uncert_coeff=3,
):

"""
Parameter description:
t_long : the time in seconds of moving window to calculate CFn of each bandpass filtered data
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@


class KTPicker:

"""
KTPicker is designed based on kurtosis.
"""
Expand All @@ -35,7 +34,6 @@ def __init__(
pol_coeff=10,
uncert_coeff=3,
):

"""
Parameter description:
t_win : the time in seconds of moving window to calculate kurtosis
Expand Down
4 changes: 1 addition & 3 deletions dug_seis/graphical_interface/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,9 +548,7 @@ def _update_event_str(self, event):
)
if len(event.magnitudes) > 0:
for magnitude in event.magnitudes:
magnitude_str += (
f" Magnitude: {magnitude.mag} \n"
)
magnitude_str += f" Magnitude: {magnitude.mag} \n"

text_str = (
f"Event {event.resource_id}\n"
Expand Down
31 changes: 22 additions & 9 deletions dug_seis/plotting/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,24 +17,26 @@
"""
Script that contains various plotting functions.
"""

import numpy as np
import matplotlib.pyplot as plt
from obspy.signal.trigger import recursive_sta_lta


def nextpow2(N):
""" Function for finding the next power of 2 """
"""Function for finding the next power of 2"""
n = 1
while n < N: n *= 2
while n < N:
n *= 2
return n


def cm_to_inch(value):
return value/2.54
return value / 2.54


def amp_fft(signal, sampling_rate, pad=1, window=False, resample_log=False):
""" Function to get single sided fft"""
"""Function to get single sided fft"""
signal = signal - np.mean(signal) # detrend
hann = np.hanning(len(signal))
total_length_signal = nextpow2(len(signal) * pad)
Expand All @@ -44,15 +46,21 @@ def amp_fft(signal, sampling_rate, pad=1, window=False, resample_log=False):
elif window is False:
signal_fft = np.fft.fft(signal, n=total_length_signal)

signal_fft = signal_fft[0:int(total_length_signal / 2 + 1)]
signal_fft = signal_fft[0 : int(total_length_signal / 2 + 1)]
signal_fft = signal_fft / len(signal) # normalise
signal_fft[1:-1] = signal_fft[1:-1] * 2 # single sided, that is why times two
freq = np.arange(0, sampling_rate / 2 + sampling_rate / total_length_signal, sampling_rate / total_length_signal)
freq = np.arange(
0,
sampling_rate / 2 + sampling_rate / total_length_signal,
sampling_rate / total_length_signal,
)
res = freq[1:2][0]

if resample_log:
freq_int = np.logspace(0.1, 5, num=10000)
signal_fft_interp = np.interp(freq_int, freq, signal_fft, left=None, right=None, period=None)
signal_fft_interp = np.interp(
freq_int, freq, signal_fft, left=None, right=None, period=None
)
return signal_fft_interp, freq_int, res
else:
return signal_fft, freq, res
Expand Down Expand Up @@ -152,6 +160,7 @@ def plot_time_waveform(stream, markers="no"):
)
return fig


def plot_time_waveform_picks(stream, picks):
fig = plot_time_waveform(stream)
for index_pick, pick in enumerate(picks):
Expand All @@ -160,14 +169,18 @@ def plot_time_waveform_picks(stream, picks):
if pick_id == stream.traces[index_trace].id:
time = (pick.time - trace.stats.starttime) * 1000 # relative pick time in ms
fig.axes[index_trace].vlines(
time, fig.axes[index_trace].get_ylim()[0] * 0.5,
time,
fig.axes[index_trace].get_ylim()[0] * 0.5,
fig.axes[index_trace].get_ylim()[1] * 0.5,
color="r", linewidth=4)
color="r",
linewidth=4,
)
else:
continue
fig.set_size_inches(11.69, 8.27)
return fig


def plot_time_characteristic_function(stream, nsta, nlta):
# chose plotting style
plt.style.use("seaborn-bright")
Expand Down
2 changes: 1 addition & 1 deletion dug_seis/project/project.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ def _load_waveforms(self):
# if extra_channels_in_meta_data:
# raise ValueError(msg)
# Otherwise only warn.
#elif extra_channels_in_data:
# elif extra_channels_in_data:
# logger.warn(msg)

self.__waveform_handler = wh
Expand Down
Loading

0 comments on commit 3ab60d2

Please sign in to comment.