From b25f387d519a503ba943c268e2a9cf984d1e491b Mon Sep 17 00:00:00 2001 From: theOehrly <23384863+theOehrly@users.noreply.github.com> Date: Thu, 21 Dec 2023 17:04:00 +0100 Subject: [PATCH] MNT: fix line length in all relevant files --- conftest.py | 34 +- examples/plot_gear_shifts_on_track.py | 3 +- examples/plot_qualifying_results.py | 7 +- examples/plot_speed_on_track.py | 9 +- fastf1/core.py | 596 ++++++++++++++++---------- fastf1/ergast/structure.py | 6 +- fastf1/events.py | 7 +- fastf1/livetiming/__init__.py | 39 +- fastf1/livetiming/client.py | 3 +- fastf1/livetiming/data.py | 3 +- fastf1/plotting.py | 27 +- fastf1/utils.py | 18 +- 12 files changed, 475 insertions(+), 277 deletions(-) diff --git a/conftest.py b/conftest.py index 573982172..6c8599204 100644 --- a/conftest.py +++ b/conftest.py @@ -22,22 +22,29 @@ def pytest_addoption(parser): ) parser.addoption( "--slow", action="store_true", default=False, - help="run very slow tests too: this may take 30 minutes or more and will may multiple" - "hundred requests to the api server - usage is highly discouraged" + help="run very slow tests too: this may take 30 minutes or more and " + "will make multiple hundred requests to the api server - usage " + "is highly discouraged" ) def pytest_configure(config): - config.addinivalue_line("markers", "f1telapi: test connects to the f1 telemetry api") - config.addinivalue_line("markers", "ergastapi: test connects to the ergast api") - config.addinivalue_line("markers", "prjdoc: general non-code tests for project and structure") - config.addinivalue_line("markers", "slow: extremely slow tests (multiple minutes)") + config.addinivalue_line("markers", + "f1telapi: test connects to the f1 telemetry api") + config.addinivalue_line("markers", + "ergastapi: test connects to the ergast api") + config.addinivalue_line("markers", + "prjdoc: general non-code tests for project and " + "structure") + config.addinivalue_line("markers", + "slow: extremely slow tests (multiple minutes)") def pytest_collection_modifyitems(config, items): # cli conditional skip extremely slow tests if not config.getoption("--slow"): - skip_slow = pytest.mark.skip(reason="need --slow option to run; usage highly discouraged") + skip_slow = pytest.mark.skip(reason="need --slow option to run; " + "usage highly discouraged") for item in items: if "slow" in item.keywords: item.add_marker(skip_slow) @@ -52,23 +59,28 @@ def pytest_collection_modifyitems(config, items): # cli conditional skip test that connect to the ergast api if not config.getoption("--ergast-api"): - skip_ergast = pytest.mark.skip(reason="need --ergast-api option to run") + skip_ergast = pytest.mark.skip(reason="need --ergast-api option to " + "run") for item in items: if "ergastapi" in item.keywords: item.add_marker(skip_ergast) # lint only: skip all if config.getoption('--lint-only'): - items[:] = [item for item in items if item.get_closest_marker('flake8')] + items[:] = [item for item in items + if item.get_closest_marker('flake8')] # only test documentation and project structure if config.getoption('--prj-doc'): - skip_non_prj = pytest.mark.skip(reason="--prj-doc given: run only project structure and documentation tests") + skip_non_prj = pytest.mark.skip(reason="--prj-doc given: run only " + "project structure and " + "documentation tests") for item in items: if "prjdoc" not in item.keywords: item.add_marker(skip_non_prj) else: - skip_prj = pytest.mark.skip(reason="need --prj-doc to run project structure and documentation tests") + skip_prj = pytest.mark.skip(reason="need --prj-doc to run project " + "structure and documentation tests") for item in items: if "prjdoc" in item.keywords: item.add_marker(skip_prj) diff --git a/examples/plot_gear_shifts_on_track.py b/examples/plot_gear_shifts_on_track.py index ac61173e0..8d6002608 100644 --- a/examples/plot_gear_shifts_on_track.py +++ b/examples/plot_gear_shifts_on_track.py @@ -61,7 +61,8 @@ # Add a colorbar to the plot. Shift the colorbar ticks by +0.5 so that they # are centered for each color segment. -cbar = plt.colorbar(mappable=lc_comp, label="Gear", boundaries=np.arange(1, 10)) +cbar = plt.colorbar(mappable=lc_comp, label="Gear", + boundaries=np.arange(1, 10)) cbar.set_ticks(np.arange(1.5, 9.5)) cbar.set_ticklabels(np.arange(1, 9)) diff --git a/examples/plot_qualifying_results.py b/examples/plot_qualifying_results.py index 80981a744..f466a8c7c 100644 --- a/examples/plot_qualifying_results.py +++ b/examples/plot_qualifying_results.py @@ -15,7 +15,8 @@ # we only want support for timedelta plotting in this example -fastf1.plotting.setup_mpl(mpl_timedelta_support=True, color_scheme=None, misc_mpl_mods=False) +fastf1.plotting.setup_mpl(mpl_timedelta_support=True, color_scheme=None, + misc_mpl_mods=False) session = fastf1.get_session(2021, 'Spanish Grand Prix', 'Q') session.load() @@ -37,7 +38,9 @@ for drv in drivers: drvs_fastest_lap = session.laps.pick_driver(drv).pick_fastest() list_fastest_laps.append(drvs_fastest_lap) -fastest_laps = Laps(list_fastest_laps).sort_values(by='LapTime').reset_index(drop=True) +fastest_laps = Laps(list_fastest_laps) \ + .sort_values(by='LapTime') \ + .reset_index(drop=True) ############################################################################## diff --git a/examples/plot_speed_on_track.py b/examples/plot_speed_on_track.py index 3784a1485..cbef8576c 100644 --- a/examples/plot_speed_on_track.py +++ b/examples/plot_speed_on_track.py @@ -57,11 +57,13 @@ # After this, we plot the data itself. # Create background track line -ax.plot(lap.telemetry['X'], lap.telemetry['Y'], color='black', linestyle='-', linewidth=16, zorder=0) +ax.plot(lap.telemetry['X'], lap.telemetry['Y'], + color='black', linestyle='-', linewidth=16, zorder=0) # Create a continuous norm to map from data points to colors norm = plt.Normalize(color.min(), color.max()) -lc = LineCollection(segments, cmap=colormap, norm=norm, linestyle='-', linewidth=5) +lc = LineCollection(segments, cmap=colormap, norm=norm, + linestyle='-', linewidth=5) # Set the values used for colormapping lc.set_array(color) @@ -73,7 +75,8 @@ # Finally, we create a color bar as a legend. cbaxes = fig.add_axes([0.25, 0.05, 0.5, 0.05]) normlegend = mpl.colors.Normalize(vmin=color.min(), vmax=color.max()) -legend = mpl.colorbar.ColorbarBase(cbaxes, norm=normlegend, cmap=colormap, orientation="horizontal") +legend = mpl.colorbar.ColorbarBase(cbaxes, norm=normlegend, cmap=colormap, + orientation="horizontal") # Show the plot diff --git a/fastf1/core.py b/fastf1/core.py index bb9867a59..eab4846c3 100644 --- a/fastf1/core.py +++ b/fastf1/core.py @@ -52,6 +52,7 @@ import fastf1 from fastf1 import _api as api from fastf1 import ergast +from fastf1.livetiming.data import LiveTimingData from fastf1.mvapi import get_circuit_info, CircuitInfo from fastf1.logger import get_logger, soft_exceptions from fastf1.utils import to_timedelta @@ -166,15 +167,16 @@ class Telemetry(pd.DataFrame): 'Y': {'type': 'continuous', 'missing': 'quadratic'}, 'Z': {'type': 'continuous', 'missing': 'quadratic'}, 'Status': {'type': 'discrete'}, - 'Speed': {'type': 'continuous', 'missing': 'linear'}, # linear is often required as quadratic overshoots - 'RPM': {'type': 'continuous', 'missing': 'linear'}, # on sudden changes like sudden pedal application) + 'Speed': {'type': 'continuous', 'missing': 'linear'}, + 'RPM': {'type': 'continuous', 'missing': 'linear'}, 'Throttle': {'type': 'continuous', 'missing': 'linear'}, + # linear is often required as quadratic overshoots on sudden changes 'Brake': {'type': 'discrete'}, 'DRS': {'type': 'discrete'}, 'nGear': {'type': 'discrete'}, - 'Source': {'type': 'excluded'}, # special case, custom handling - 'Date': {'type': 'excluded'}, # special case, used as the index during resampling - 'Time': {'type': 'excluded'}, # special case, Time/SessionTime recalculated from 'Date' + 'Source': {'type': 'excluded'}, # special, custom handling + 'Date': {'type': 'excluded'}, # special, used as index when resampling + 'Time': {'type': 'excluded'}, # special, recalculated from 'Date' 'SessionTime': {'type': 'excluded'}, 'Distance': {'type': 'continuous', 'missing': 'quadratic'}, 'RelativeDistance': {'type': 'continuous', 'missing': 'quadratic'}, @@ -217,7 +219,8 @@ def _new(*args, **kwargs): @property def base_class_view(self): - """For a nicer debugging experience; can view DataFrame through this property in various IDEs""" + """For a nicer debugging experience; can view DataFrame through this + property in various IDEs""" return pd.DataFrame(self) def join(self, *args, **kwargs): @@ -289,9 +292,10 @@ def slice_by_lap( .. note:: Self needs to contain a 'SessionTime' column. - .. note:: When slicing with an instance of :class:`Laps` as a reference, the data will be sliced by first and - last lap. Missing laps in between will not be considered and data for these will still be included in - the sliced result. + .. note:: When slicing with an instance of :class:`Laps` as a + reference, the data will be sliced by first and last lap. Missing + laps in between will not be considered and data for these will + still be included in the sliced result. Args: ref_laps: The lap/laps by which to slice self @@ -303,25 +307,30 @@ def slice_by_lap( """ if isinstance(ref_laps, Laps) and len(ref_laps) > 1: if 'DriverNumber' not in ref_laps.columns: - ValueError("Laps is missing 'DriverNumber'. Cannot return telemetry for unknown driver.") + ValueError("Laps is missing 'DriverNumber'. Cannot return " + "telemetry for unknown driver.") if not len(ref_laps['DriverNumber'].unique()) <= 1: - raise ValueError("Cannot create telemetry for multiple drivers at once!") + raise ValueError("Cannot create telemetry for multiple " + "drivers at once!") end_time = ref_laps['Time'].max() start_time = ref_laps['LapStartTime'].min() elif isinstance(ref_laps, (Lap, Laps)): if isinstance(ref_laps, Laps): # one lap in Laps - ref_laps = ref_laps.iloc[0] # needs to be handled as a single lap + ref_laps = ref_laps.iloc[0] # handle as a single lap if 'DriverNumber' not in ref_laps.index: - ValueError("Lap is missing 'DriverNumber'. Cannot return telemetry for unknown driver.") + ValueError("Lap is missing 'DriverNumber'. Cannot return " + "telemetry for unknown driver.") end_time = ref_laps['Time'] start_time = ref_laps['LapStartTime'] else: - raise TypeError("Attribute 'ref_laps' needs to be an instance of `Lap` or `Laps`") + raise TypeError("Attribute 'ref_laps' needs to be an instance of " + "`Lap` or `Laps`") - return self.slice_by_time(start_time, end_time, pad, pad_side, interpolate_edges) + return self.slice_by_time(start_time, end_time, pad, pad_side, + interpolate_edges) def slice_by_time( self, @@ -333,7 +342,8 @@ def slice_by_time( ) -> "Telemetry": """Slice self to only include data in a specific time frame. - .. note:: Self needs to contain a 'SessionTime' column. Slicing by time use the 'SessionTime' as its reference. + .. note:: Self needs to contain a 'SessionTime' column. Slicing by + time use the 'SessionTime' as its reference. Args: start_time: Start of the section @@ -349,20 +359,24 @@ def slice_by_time( """ if interpolate_edges: edges = Telemetry({'SessionTime': (start_time, end_time), - 'Date': (start_time + self.session.t0_date, end_time + self.session.t0_date)}, + 'Date': (start_time + self.session.t0_date, + end_time + self.session.t0_date) + }, session=self.session).__finalize__(self) d = self.merge_channels(edges) else: d = self.copy() # TODO no copy? - sel = ((d['SessionTime'] <= end_time) & (d['SessionTime'] >= start_time)) + sel = ((d['SessionTime'] <= end_time) + & (d['SessionTime'] >= start_time)) if np.any(sel): data_slice = d.slice_by_mask(sel, pad, pad_side) if 'Time' in data_slice.columns: # shift time to 0 so laps can overlap - data_slice.loc[:, 'Time'] = data_slice['SessionTime'] - start_time + data_slice.loc[:, 'Time'] \ + = data_slice['SessionTime'] - start_time return data_slice return Telemetry().__finalize__(self) @@ -374,35 +388,45 @@ def merge_channels( ): """Merge telemetry objects containing different telemetry channels. - The two objects don't need to have a common time base. The data will be merged, optionally resampled and - missing values will be interpolated. + The two objects don't need to have a common time base. The data will + be merged, optionally resampled and missing values will be + interpolated. - :attr:`Telemetry.TELEMETRY_FREQUENCY` determines if and how the data is resampled. This can be overridden using - the `frequency` keyword for this method. + :attr:`Telemetry.TELEMETRY_FREQUENCY` determines if and how the data + is resampled. This can be overridden using the `frequency` keyword for + this method. Merging and resampling: - If the frequency is 'original', data will not be resampled. The two objects will be merged and all - timestamps of both objects are kept. Values will be interpolated so that all telemetry channels contain - valid data for all timestamps. This is the default and recommended option. + If the frequency is 'original', data will not be resampled. The + two objects will be merged and all timestamps of both objects are + kept. Values will be interpolated so that all telemetry channels + contain valid data for all timestamps. This is the default and + recommended option. - If the frequency is specified as an integer in Hz the data will be merged as before. After that, the merged - time base will be resampled from the first value on at the specified frequency. Afterwards, the data will - be interpolated to fit the new time base. This means that usually most if not all values of the data will - be interpolated values. This is detrimental for overall accuracy. + If the frequency is specified as an integer in Hz the data will be + merged as before. After that, the merged time base will be + resampled from the first value on at the specified frequency. + Afterward, the data will be interpolated to fit the new time base. + This means that usually most if not all values of the data will be + interpolated values. This is detrimental for overall accuracy. Interpolation: - Missing values after merging will be interpolated for all known telemetry channels using - :meth:`fill_missing`. Different interpolation methods are used depending on what kind of data the channel - contains. For example, forward fill is used to interpolated 'nGear' while linear interpolation is used - for 'RPM' interpolation. + Missing values after merging will be interpolated for all known + telemetry channels using :meth:`fill_missing`. Different + interpolation methods are used depending on what kind of data the + channel contains. For example, forward fill is used to + interpolated 'nGear' while linear interpolation is used for 'RPM' + interpolation. - .. note :: Unknown telemetry channels will be merged but missing values will not be interpolated. This can - either be done manually or a custom telemetry channel can be added using :meth:`register_new_channel`. + .. note :: Unknown telemetry channels will be merged but missing + values will not be interpolated. This can either be done manually + or a custom telemetry channel can be added using + :meth:`register_new_channel`. - .. note :: Do not resample data multiple times. Always resample based on the original data - to preserve accuracy + .. note :: Do not resample data multiple times. Always resample based + on the original data to preserve accuracy Args: other: Object to be merged with self @@ -414,20 +438,25 @@ def merge_channels( data = self.set_index('Date') other = other.set_index('Date') - # save dtypes before merging so they can be restored after merging - # necessary for example because merging produces NaN values which would cause an int column to become float - # but it can be converted back to int after interpolating missing values + # save dtypes before merging, so they can be restored after merging + # necessary for example because merging produces NaN values which + # would cause an int column to become float, but it can be converted + # back to int after interpolating missing values dtype_map = dict() for df in data, other: for col in df.columns: if col not in dtype_map.keys(): dtype_map[col] = df[col].dtype - # Exclude columns existing on both dataframes from one dataframe before merging (cannot merge with duplicates) + # Exclude columns existing on both dataframes from one dataframe + # before merging (cannot merge with duplicates) on_both_columns = set(other.columns).intersection(set(data.columns)) - merged = other.merge(data[data.columns.difference(on_both_columns, sort=False)], - how='outer', left_index=True, right_index=True, sort=True) - # now use the previously excluded columns to update the missing values in the merged dataframe + merged = other.merge( + data[data.columns.difference(on_both_columns, sort=False)], + how='outer', left_index=True, right_index=True, sort=True + ) + # now use the previously excluded columns to update the missing values + # in the merged dataframe for col in on_both_columns: merged[col].update(data[col]) @@ -443,13 +472,15 @@ def merge_channels( ref_date = merged.index[i] - # data needs to be resampled/interpolated differently, depending on what kind of data it is - # how to handle which column is defined in self._CHANNELS + # data needs to be resampled/interpolated differently, depending on + # what kind of data it is how to handle which column is defined in + # self._CHANNELS if frequency == 'original': # no resampling but still interpolation due to merging merged = merged.fill_missing() - merged = merged.reset_index().rename(columns={'index': 'Date'}) # make 'Date' a column again + # make 'Date' a column again + merged = merged.reset_index().rename(columns={'index': 'Date'}) else: frq = f'{1 / frequency}S' @@ -464,21 +495,28 @@ def merge_channels( if sig_type == 'continuous': missing = self._CHANNELS[ch]['missing'] res = merged.loc[:, ch] \ - .resample(frq, origin=ref_date).mean().interpolate(method=missing, fill_value='extrapolate') + .resample(frq, origin=ref_date).mean() \ + .interpolate(method=missing, fill_value='extrapolate') elif sig_type == 'discrete': - res = merged.loc[:, ch].resample(frq, origin=ref_date).ffill().ffill().bfill() - # first ffill is a method of the resampler object and will ONLY ffill values created during - # resampling but not already existing NaN values. NaN values already existed because of merging, - # therefore call ffill a second time as a method of the returned series to fill these too - # only use bfill after ffill to fix first row + res = merged.loc[:, ch].resample(frq, origin=ref_date) \ + .ffill().ffill().bfill() + # first ffill is a method of the resampler object and will + # ONLY ffill values created during resampling but not + # already existing NaN values. NaN values already existed + # because of merging, therefore call ffill a second time as + # a method of the returned series to fill these too only + # use bfill after ffill to fix first row else: continue resampled_columns[ch] = res - res_source = merged.loc[:, 'Source'].resample(frq, origin=ref_date).asfreq().fillna(value='interpolation') + res_source = merged.loc[:, 'Source'] \ + .resample(frq, origin=ref_date) \ + .asfreq() \ + .fillna(value='interpolation') resampled_columns['Source'] = res_source # join resampled columns and make 'Date' a column again @@ -488,8 +526,10 @@ def merge_channels( .rename(columns={'index': 'Date'}) # recalculate the time columns - merged['SessionTime'] = merged['Date'] - self.session.t0_date - merged['Time'] = merged['SessionTime'] - merged['SessionTime'].iloc[0] + merged['SessionTime'] \ + = merged['Date'] - self.session.t0_date + merged['Time'] \ + = merged['SessionTime'] - merged['SessionTime'].iloc[0] # restore data types from before merging for col in dtype_map.keys(): @@ -509,18 +549,22 @@ def resample_channels( ): """Resample telemetry data. - Convenience method for frequency conversion and resampling. Up and down sampling of data is supported. - 'Date' and 'SessionTime' need to exist in the data. 'Date' is used as the main time reference. + Convenience method for frequency conversion and resampling. Up and + down sampling of data is supported. 'Date' and 'SessionTime' need to + exist in the data. 'Date' is used as the main time reference. There are two ways to use this method: - - Usage like :meth:`pandas.DataFrame.resample`: In this case you need to specify the 'rule' for resampling - and any additional keywords will be passed on to :meth:`pandas.Series.resample` to create a new time - reference. See the pandas method to see which options are available. + - Usage like :meth:`pandas.DataFrame.resample`: In this case you + need to specify the 'rule' for resampling and any additional + keywords will be passed on to :meth:`pandas.Series.resample` to + create a new time reference. See the pandas method to see which + options are available. - - using the 'new_date_ref' keyword a :class:`pandas.Series` containing new values for date - (dtype :class:`pandas.Timestamp`) can be provided. The existing data will be resampled onto this new - time reference. + - using the 'new_date_ref' keyword a :class:`pandas.Series` + containing new values for date (dtype :class:`pandas.Timestamp`) + can be provided. The existing data will be resampled onto this + new time reference. Args: rule: Resampling rule for :meth:`pandas.Series.resample` @@ -529,9 +573,11 @@ def resample_channels( for :meth:`pandas.Series.resample` """ if rule is not None and new_date_ref is not None: - raise ValueError("You can only specify one of 'rule' or 'new_index'") + raise ValueError("You can only specify one of 'rule' or " + "'new_index'") if rule is None and new_date_ref is None: - raise ValueError("You need to specify either 'rule' or 'new_index'") + raise ValueError("You need to specify either 'rule' or " + "'new_index'") if new_date_ref is None: st = pd.Series(index=pd.DatetimeIndex(self['Date']), dtype=int) \ @@ -553,8 +599,9 @@ def resample_channels( def fill_missing(self): """Calculate missing values in self. - Only known telemetry channels will be interpolated. Unknown channels are skipped and returned unmodified. - Interpolation will be done according to the default mapping and according to options specified for + Only known telemetry channels will be interpolated. Unknown channels + are skipped and returned unmodified. Interpolation will be done + according to the default mapping and according to options specified for registered custom channels. For example: | Linear interpolation will be used for continuous values (Speed, RPM) | Forward-fill will be used for discrete values (Gear, DRS, ...) @@ -567,28 +614,33 @@ def fill_missing(self): if ch not in self.columns: continue sig_type = self._CHANNELS[ch]['type'] - if sig_type == 'continuous': # yes, this is necessary to prevent pandas from crashing + if sig_type == 'continuous': if ret[ch].dtype == 'object': warnings.warn("Interpolation not possible for telemetry " "channel because dtype is 'object'") missing = self._CHANNELS[ch]['missing'] ret.loc[:, ch] = ret.loc[:, ch] \ - .interpolate(method=missing, limit_direction='both', fill_value='extrapolate') + .interpolate(method=missing, limit_direction='both', + fill_value='extrapolate') elif sig_type == 'discrete': ret.loc[:, ch] = ret.loc[:, ch].ffill().ffill().bfill() - # first ffill is a method of the resampler object and will ONLY ffill values created during - # resampling but not already existing NaN values. NaN values already existed because of merging, - # therefore call ffill a second time as a method of the returned series to fill these too - # only use bfill after ffill to fix first row + # first ffill is a method of the resampler object and will + # ONLY ffill values created during resampling but not already + # existing NaN values. NaN values already existed because of + # merging, therefore call ffill a second time as a method of + # the returned series to fill these too only use bfill after + # ffill to fix first row if 'Source' in ret.columns: - ret.loc[:, 'Source'] = ret.loc[:, 'Source'].fillna(value='interpolation') + ret.loc[:, 'Source'] = ret.loc[:, 'Source'] \ + .fillna(value='interpolation') if 'Date' in self.columns: ret['SessionTime'] = ret['Date'] - self.session.t0_date elif isinstance(ret.index, pd.DatetimeIndex): - ret['SessionTime'] = ret.index - self.session.t0_date # assume index is Date + # assume index is Date + ret['SessionTime'] = ret.index - self.session.t0_date ret['Time'] = ret['SessionTime'] - ret['SessionTime'].iloc[0] return ret @@ -602,7 +654,8 @@ def register_new_channel( ): """Register a custom telemetry channel. - Registered telemetry channels are automatically interpolated when merging or resampling data. + Registered telemetry channels are automatically interpolated when + merging or resampling data. Args: name: Telemetry channel/column name @@ -619,14 +672,21 @@ def register_new_channel( if signal_type not in ('discrete', 'continuous', 'excluded'): raise ValueError(f"Unknown signal type {signal_type}.") if signal_type == 'continuous' and interpolation_method is None: - raise ValueError("signal_type='continuous' requires interpolation_method to be specified.") + raise ValueError("signal_type='continuous' requires " + "interpolation_method to be specified.") - cls._CHANNELS[name] = {'type': signal_type, 'missing': interpolation_method} + cls._CHANNELS[name] = {'type': signal_type, + 'missing': interpolation_method} def get_first_non_zero_time_index(self): - """Return the first index at which the 'Time' value is not zero or NA/NaT""" - # find first row where time is not zero; usually this is the first row but sometimes..... - i_arr = np.where((self['Time'] != pd.Timedelta(0)) & ~pd.isna(self['Time']))[0] + """ + Return the first index at which the 'Time' value is not zero + or NA/NaT + """ + # find first row where time is not zero; usually this is the first row + # but sometimes..... + i_arr = np.where((self['Time'] != pd.Timedelta(0)) + & ~pd.isna(self['Time']))[0] if i_arr.size != 0: return np.min(i_arr) return None @@ -663,10 +723,12 @@ def add_differential_distance( def add_distance(self, drop_existing: bool = True) -> "Telemetry": """Add column 'Distance' to self. - This column contains the distance driven since the first sample of self in meters. + This column contains the distance driven since the first sample of + self in meters. - The data is produced by integrating the differential distance between subsequent laps. - You should not apply this function to telemetry of many laps simultaneously to reduce integration error. + The data is produced by integrating the differential distance between + subsequent laps. You should not apply this function to telemetry of + many laps simultaneously to reduce integration error. Instead apply it only to single laps or few laps at a time! Calls :meth:`integrate_distance` and joins the result with self. @@ -682,7 +744,8 @@ def add_distance(self, drop_existing: bool = True) -> "Telemetry": new_dist = pd.DataFrame({'Distance': self.integrate_distance()}) if 'Distance' in self.columns: - return self.drop(labels='Distance', axis=1).join(new_dist, how='outer') + return self.drop(labels='Distance', axis=1) \ + .join(new_dist, how='outer') return self.join(new_dist, how='outer') @@ -693,7 +756,8 @@ def add_relative_distance(self, drop_existing: bool = True) -> "Telemetry": a floating point number where ``0.0`` is the first sample of self and ``1.0`` is the last sample. - This is calculated the same way as 'Distance' (see: :meth:`add_distance`). The same warnings apply. + This is calculated the same way as 'Distance' + (see: :meth:`add_distance`). The same warnings apply. Args: drop_existing: Drop and recalculate column if it already exists @@ -714,7 +778,8 @@ def add_relative_distance(self, drop_existing: bool = True) -> "Telemetry": else: dist = d.integrate_distance() rel_dist = dist / dist.iloc[-1] - return d.join(pd.DataFrame({'RelativeDistance': rel_dist}), how='outer') + return d.join(pd.DataFrame({'RelativeDistance': rel_dist}), + how='outer') def add_track_status(self, drop_existing=True): """Add column 'TrackStatus' to self. @@ -771,15 +836,18 @@ def add_driver_ahead(self, drop_existing: bool = True) -> "Telemetry": DriverAhead: Driver number of the driver ahead as string DistanceToDriverAhead: Distance to next car ahead in meters - .. note:: Cars in the pit lane are currently not excluded from the data. They will show up when overtaken on - pit straight even if they're not technically in front of the car. A fix for this is TBD with other - improvements. + .. note:: Cars in the pit lane are currently not excluded from the + data. They will show up when overtaken on pit straight even if + they're not technically in front of the car. A fix for this is + TBD with other improvements. - This should only be applied to data of single laps or few laps at a time to reduce integration error. + This should only be applied to data of single laps or few laps at a + time to reduce integration error. For longer time spans it should be applied per lap and the laps should be merged afterwards. - If you absolutely need to apply it to a whole session, use the legacy implementation. Note that data of - the legacy implementation will be considerably less smooth. (see :mod:`fastf1.legacy`) + If you absolutely need to apply it to a whole session, use the legacy + implementation. Note that data of the legacy implementation will be + considerably less smooth. (see :mod:`fastf1.legacy`) Calls :meth:`calculate_driver_ahead` and joins the result with self. @@ -789,7 +857,8 @@ def add_driver_ahead(self, drop_existing: bool = True) -> "Telemetry": self joined with new column or self if column exists and `drop_existing` is False. """ - if 'DriverAhead' in self.columns and 'DistanceToDriverAhead' in self.columns: + if (('DriverAhead' in self.columns) + and ('DistanceToDriverAhead' in self.columns)): if drop_existing: d = self.drop(labels='DriverAhead', axis=1) \ .drop(labels='DistanceToDriverAhead', axis=1) @@ -831,7 +900,8 @@ def calculate_differential_distance(self) -> pd.Series: Distance is in meters """ if not all([col in self.columns for col in ('Speed', 'Time')]): - raise ValueError("Telemetry does not contain required channels 'Time' and 'Speed'.") + raise ValueError("Telemetry does not contain required channels " + "'Time' and 'Speed'.") if self.size != 0: dt = self['Time'].dt.total_seconds().diff() dt.iloc[0] = self['Time'].iloc[0].total_seconds() @@ -843,8 +913,10 @@ def calculate_differential_distance(self) -> pd.Series: def integrate_distance(self): """Return the distance driven since the first sample of self. - Distance is in meters. The data is produce by integration. Integration error will stack up when used for - long slices of data. This should therefore only be used for data of single laps or few laps at a time. + Distance is in meters. The data is produce by integration. + Integration error will stack up when used for long slices of data. + This should therefore only be used for data of single laps or few + laps at a time. Returns: :class:`pd.Series` @@ -861,9 +933,11 @@ def calculate_driver_ahead(self, return_reference: bool = False): Driver ahead: Driver number of the driver ahead as string Distance to driver ahead: Distance to the car ahead in meters - .. note:: This gives a smoother/cleaner result than the legacy implementation but WILL introduce - integration error when used over long distances (more than one or two laps may sometimes be considered - a long distance). If in doubt, do sanity checks (against the legacy version or in another way). + .. note:: This gives a smoother/cleaner result than the legacy + implementation but WILL introduce integration error when used + over long distances (more than one or two laps may sometimes be + considered a long distance). If in doubt, do sanity checks + (against the legacy version or in another way). Args: return_reference: Additionally return the reference @@ -880,38 +954,48 @@ def calculate_driver_ahead(self, return_reference: bool = False): # Assume the following lap profile as a catch all for all drivers # - # |------ Lap before ------|------ n Laps between ------|------ Lap after ------| - # ^ ^ - # t_start t_end - # Integration of the distance needs to start at the finish line so that there exists a common zero point - # Therefore find the "lap before" which is the lap during which the telemetry slice starts and the "lap after" - # where the telemetry slice ends - # Integrate distance over all relevant laps and slice by t_start and t_end after to get the interesting - # part only - own_laps = self.session.laps[self.session.laps['DriverNumber'] == self.driver] - first_lap_number = (own_laps[own_laps['LapStartTime'] <= t_start])['LapNumber'].iloc[-1] - + # |---- Lap before ----|---- n Laps between ----|---- Lap after ----| + # ^ ^ + # t_start t_end + # Integration of the distance needs to start at the finish line so + # that there exists a common zero point. Therefore find the "lap + # before" which is the lap during which the telemetry slice starts and + # the "lap after" where the telemetry slice ends. + # Integrate distance over all relevant laps and slice by t_start and + # t_end after to get the interesting part only. + own_laps = self.session.laps[ + self.session.laps['DriverNumber'] == self.driver + ] + first_lap_number = ((own_laps[own_laps['LapStartTime'] <= t_start]) + ['LapNumber'].iloc[-1]) own_ref_tel = None for drv in self.session.drivers: if drv not in self.session.car_data: continue - # find correct first relevant lap; very important for correct zero point in distance - drv_laps = self.session.laps[self.session.laps['DriverNumber'] == drv] - if drv_laps.empty: # Only include drivers who participated in this session + # find correct first relevant lap; very important for correct zero + # point in distance + drv_laps = self.session.laps[ + self.session.laps['DriverNumber'] == drv + ] + if drv_laps.empty: + # Only include drivers who participated in this session continue drv_laps_before = drv_laps[(drv_laps['LapStartTime'] <= t_start)] if not drv_laps_before.empty: lap_n_before = drv_laps_before['LapNumber'].iloc[-1] if lap_n_before < first_lap_number: - # driver is behind on track an therefore will cross the finish line AFTER self - # therefore above check for LapStartTime <= t_start is wrong - # the first relevant lap is the first lap with LapStartTime > t_start which is lap_n_before += 1 + # driver is behind on track an therefore will cross the + # finish line AFTER self therefore above check for + # LapStartTime <= t_start is wrong the first relevant lap + # is the first lap with LapStartTime > t_start which is + # lap_n_before += 1 lap_n_before += 1 else: lap_n_before = min(drv_laps['LapNumber']) - # find last relevant lap so as to no do too much unnecessary calculation later + # find last relevant lap so as to no do too much unnecessary + # calculation later drv_laps_after = drv_laps[drv_laps['Time'] >= t_end] lap_n_after = drv_laps_after['LapNumber'].iloc[0] \ if not drv_laps_after.empty \ @@ -954,7 +1038,8 @@ def calculate_driver_ahead(self, return_reference: bool = False): if (relevant_laps is None) or relevant_laps.empty: continue - # first slice by lap and calculate distance, so that distance is zero at finish line + # first slice by lap and calculate distance, so that distance is + # zero at finish line drv_tel = self.session.car_data[drv] \ .slice_by_lap(relevant_laps) @@ -978,28 +1063,44 @@ def calculate_driver_ahead(self, return_reference: bool = False): combined_distance = combined_distance.join(drv_tel, how='outer') # create driver map for array - drv_map = combined_distance.loc[:, combined_distance.columns != self.driver].columns.to_numpy() + drv_map = combined_distance \ + .loc[:, combined_distance.columns != self.driver] \ + .columns.to_numpy() own_dst = combined_distance.loc[:, self.driver].to_numpy() - other_dst = combined_distance.loc[:, combined_distance.columns != self.driver].to_numpy() + other_dst = combined_distance \ + .loc[:, combined_distance.columns != self.driver] \ + .to_numpy() # replace distance with nan if it does not change - # prepend first row before diff so that array size stays the same; but missing first sample because of that - other_dst[np.diff(other_dst, n=1, axis=0, prepend=other_dst[0, :].reshape((1, -1))) == 0] = np.nan + # prepend first row before diff so that array size stays the same; + # but missing first sample because of that + other_dst[ + np.diff(other_dst, n=1, axis=0, prepend=other_dst[0, :] + .reshape((1, -1))) == 0 + ] = np.nan # resize own_dst to match shape of other_dst for easy subtraction - own_dst = np.repeat(own_dst.reshape((-1, 1)), other_dst.shape[1], axis=1) + own_dst = np.repeat( + own_dst.reshape((-1, 1)), other_dst.shape[1], axis=1 + ) delta_dst = other_dst - own_dst - delta_dst[np.isnan(delta_dst)] = np.inf # substitute nan with inf, else nan is returned as min - delta_dst[delta_dst < 0] = np.inf # remove cars behind so that neg numbers are not returned as min + # substitute nan with inf, else nan is returned as min + delta_dst[np.isnan(delta_dst)] = np.inf + # remove cars behind so that neg numbers are not returned as min + delta_dst[delta_dst < 0] = np.inf index_ahead = np.argmin(delta_dst, axis=1) drv_ahead = np.array([drv_map[i] for i in index_ahead]) - drv_ahead[np.all(delta_dst == np.inf, axis=1)] = '' # remove driver from all inf rows + # remove driver from all inf rows + drv_ahead[np.all(delta_dst == np.inf, axis=1)] = '' - dist_to_drv_ahead = np.array([delta_dst[i, index_ahead[i]] for i in range(len(index_ahead))]) - dist_to_drv_ahead[np.all(delta_dst == np.inf, axis=1)] = np.nan # remove value from all inf rows + dist_to_drv_ahead = np.array( + [delta_dst[i, index_ahead[i]] for i in range(len(index_ahead))] + ) + # remove value from all inf rows + dist_to_drv_ahead[np.all(delta_dst == np.inf, axis=1)] = np.nan if return_reference: return drv_ahead, dist_to_drv_ahead, own_ref_tel @@ -1022,10 +1123,11 @@ def __init__(self, event, session_name, f1_api_support=False): """:class:`~fastf1.events.Event`: Reference to the associated event object.""" self.name = session_name - """str: Name of this session, for example 'Qualifying', 'Race', 'FP1', ...""" + """str: Name of this session, for example 'Qualifying', 'Race', + 'FP1', ...""" self.f1_api_support = f1_api_support - """bool: The official F1 API supports this event and lap timing data and - telemetry data are available.""" + """bool: The official F1 API supports this event and lap timing + data and telemetry data are available.""" self.date = self.event.get_session_date(session_name, utc=True) """pandas.Datetime: Date at which this session took place.""" self.api_path = api.make_path( @@ -1064,8 +1166,10 @@ def __repr__(self): def _get_property_warn_not_loaded(self, name): if not hasattr(self, name): - raise DataNotLoadedError("The data you are trying to access has not " - "been loaded yet. See `Session.load`") + raise DataNotLoadedError( + "The data you are trying to access has not been loaded yet. " + "See `Session.load`" + ) return getattr(self, name, None) @property @@ -1191,8 +1295,9 @@ def t0_date(self): """ return self._get_property_warn_not_loaded('_t0_date') - def load(self, *, laps=True, telemetry=True, weather=True, messages=True, - livedata=None): + def load(self, *, laps: bool = True, telemetry: bool = True, + weather: bool = True, messages: bool = True, + livedata: LiveTimingData = None): """Load session data from the supported APIs. This method allows to flexibly load some or all data that FastF1 can @@ -1231,13 +1336,12 @@ def load(self, *, laps=True, telemetry=True, weather=True, messages=True, data of different laps. Args: - laps (bool): Load laps and session status data. - telemetry (bool): Load telemetry data. - weather (bool): Load weather data. - messages (bool): Load race control messages for the session - livedata (:class:`fastf1.livetiming.data.LiveTimingData`, optional): - instead of requesting the data from the api, locally saved - livetiming data can be used as a data source + laps: Load laps and session status data. + telemetry: Load telemetry data. + weather: Load weather data. + messages: Load race control messages for the session + livedata: instead of requesting the data from the api, locally + saved livetiming data can be used as a data source """ _logger.info(f"Loading data for " f"{self.event['EventName']} - {self.name}" @@ -1417,8 +1521,9 @@ def _load_laps_data(self, livedata=None): laps_start_time, dtype='timedelta64[ns]' ) - # set missing lap start times to pit out time where possible - mask = pd.isna(result['LapStartTime']) & (~pd.isna(result['PitOutTime'])) + # set missing lap start times to pit out time, where possible + mask = (pd.isna(result['LapStartTime']) + & (~pd.isna(result['PitOutTime']))) result.loc[mask, 'LapStartTime'] = result.loc[mask, 'PitOutTime'] # remove first lap pitout time if it is before session_start_time @@ -1864,38 +1969,49 @@ def __fix_tyre_info(self, df): "Failed to perform lap accuracy check!", _logger) def _check_lap_accuracy(self): - """Accuracy validation; simples yes/no validation - Currently only relies on provided information which can't catch all problems""" + """ + Accuracy validation; simples yes/no validation. Currently only relies + on provided information which can't catch all problems + """ # TODO: check for outliers in lap start position for drv in self.drivers: is_accurate = list() prev_lap = None integrity_errors = 0 - for _, lap in self.laps[self.laps['DriverNumber'] == drv].iterrows(): + for _, lap in self.laps[self.laps['DriverNumber'] == drv] \ + .iterrows(): lap_integrity_ok = True - # require existence, non-existence and specific values for some variables + # require existence, non-existence and specific values for + # some variables check_1 = (pd.isnull(lap['PitInTime']) & pd.isnull(lap['PitOutTime']) & (not lap['FastF1Generated']) - & (lap['TrackStatus'] in ('1', '2')) # slightly paranoid, allow only green and yellow flag + # slightly paranoid, allow only green + yellow flag + & (lap['TrackStatus'] in ('1', '2')) & (not pd.isnull(lap['LapTime'])) & (not pd.isnull(lap['Sector1Time'])) & (not pd.isnull(lap['Sector2Time'])) & (not pd.isnull(lap['Sector3Time']))) - if check_1: # only do check 2 if all necessary values for this check are even available - # sum of sector times should be almost equal to lap time (tolerance 3ms) - check_2 = np.allclose(np.sum((lap['Sector1Time'], lap['Sector2Time'], - lap['Sector3Time'])).total_seconds(), - lap['LapTime'].total_seconds(), - atol=0.003, rtol=0, equal_nan=False) + if check_1: + # only do check 2 if all necessary values for this check + # are even available; + # sum of sector times should be almost equal to lap time + # (tolerance 3ms) + check_2 = np.allclose( + np.sum((lap['Sector1Time'], lap['Sector2Time'], + lap['Sector3Time'])).total_seconds(), + lap['LapTime'].total_seconds(), + atol=0.003, rtol=0, equal_nan=False + ) if not check_2: lap_integrity_ok = False else: check_2 = False # data not available means fail if prev_lap is not None: - # first lap after safety car often has timing issues (as do all laps under safety car) + # first lap after safety car often has timing issues + # (as do all laps under safety car) check_3 = (prev_lap['TrackStatus'] != '4') else: check_3 = True # no previous lap, no SC error @@ -2135,7 +2251,7 @@ def _load_race_control_messages(self, livedata=None): @soft_exceptions("telemetry data", "Failed to load telemetry data!", _logger) - def _load_telemetry(self, livedata=None): + def _load_telemetry(self, livedata: LiveTimingData = None): """Load telemetry data from the API. This method can only be called after :meth:`load_laps` has been @@ -2143,22 +2259,23 @@ def _load_telemetry(self, livedata=None): the optional ``with_telemetry=True`` argument instead of calling this method separately. The result will be the same. - The raw data is divided into car data (Speed, RPM, ...) and position data (coordinates, on/off track). For each - of the two types an instance of :class:`Telemetry` is created per driver. The properties - :attr:`Session.car_data` and :attr:`Session.pos_data` are dictionaries which hold the the `Telemetry` objects - keyed by driver number. + The raw data is divided into car data (Speed, RPM, ...) and position + data (coordinates, on/off track). For each of the two types an + instance of :class:`Telemetry` is created per driver. The properties + :attr:`Session.car_data` and :attr:`Session.pos_data` are dictionaries + which hold the the `Telemetry` objects keyed by driver number. - The telemetry data can either be accessed through the above mentioned attributes or conveniently on a per - lap basis through :class:`Lap` and :class:`Laps`. See :class:`Telemetry` on how to work with the telemetry - data. + The telemetry data can either be accessed through the above mentioned + attributes or conveniently on a per ap basis through :class:`Lap` + and :class:`Laps`. See :class:`Telemetry` on how to work with the + telemetry data. - Note that this method additionally calculates :attr:`Session.t0_date` and adds a `LapStartDate` column to - :attr:`Session.laps`. + Note that this method additionally calculates :attr:`Session.t0_date` + and adds a `LapStartDate` column to :attr:`Session.laps`. Args: - livedata (:class:`fastf1.livetiming.data.LiveTimingData`, optional) : - instead of requesting the data from the api, locally saved - livetiming data can be used as a data source + livedata: instead of requesting the data from the api, locally + saved livetiming data can be used as a data source """ try: car_data = api.car_data(self.api_path, livedata=livedata) @@ -2206,7 +2323,8 @@ def _load_telemetry(self, livedata=None): def get_driver(self, identifier) -> "DriverResult": """ - Get a driver object which contains additional information about a driver. + Get a driver object which contains additional information about a + driver. Args: identifier (str): driver's three letter identifier (for @@ -2241,15 +2359,18 @@ def get_circuit_info(self) -> Optional[CircuitInfo]: return circuit_info def _calculate_t0_date(self, *tel_data_sets: dict): - """Calculate the date timestamp at which data for this session is starting. + """ + Calculate the date timestamp at which data for this session is + starting. - This does not mark the start of a race (or other sessions). This marks the start of the data which is sometimes - far before. + This does not mark the start of a race (or other sessions). This marks + the start of the data which is sometimes far before. - This function sets :attr:`self.t0_date` which is an internally required offset for some calculations. + This function sets :attr:`self.t0_date` which is an internally + required offset for some calculations. - The current assumption is that the latest date which can be calculated is correct. (Based on the timestamp with - the least delay.) + The current assumption is that the latest date which can be calculated + is correct. (Based on the timestamp with the least delay.) Args: tel_data_sets: Dictionaries containing car telemetry data or @@ -2496,14 +2617,16 @@ def base_class_view(self): def telemetry(self) -> Telemetry: """Telemetry data for all laps in `self` - This is a cached (!) property for :meth:`get_telemetry`. It will return the same value as `get_telemetry` - but cache the result so that the involved processing is only done once. + This is a cached (!) property for :meth:`get_telemetry`. It will + return the same value as `get_telemetry` but cache the result so that + the involved processing is only done once. This is mainly provided for convenience and backwards compatibility. See :meth:`get_telemetry` for more information. - .. note:: Telemetry can only be returned if `self` contains laps of one driver only. + .. note:: Telemetry can only be returned if `self` contains laps of + one driver only. Returns: instance of :class:`Telemetry`""" @@ -2557,50 +2680,68 @@ def get_telemetry(self, return merged.slice_by_lap(self, interpolate_edges=True) def get_car_data(self, **kwargs) -> Telemetry: - """Car data for all laps in `self` + """ + Car data for all laps in `self` - Slices the car data in :attr:`Session.car_data` using this set of laps and returns the result. + Slices the car data in :attr:`Session.car_data` using this set of laps + and returns the result. - The data returned by this method does not contain computed telemetry channels. The can be added by calling the - appropriate `add_*()` method on the returned telemetry object.. + The data returned by this method does not contain computed telemetry + channels. The can be added by calling the appropriate `add_*()` method + on the returned telemetry object.. - .. note:: Car data can only be returned if `self` contains laps of one driver only. + .. note:: Car data can only be returned if `self` contains laps of + one driver only. Args: - **kwargs: Keyword arguments are passed to :meth:`Telemetry.slice_by_lap` + **kwargs: Keyword arguments are passed to + :meth:`Telemetry.slice_by_lap` Returns: instance of :class:`Telemetry` """ drv_num = self['DriverNumber'].unique() if len(drv_num) == 0: - raise ValueError("Cannot slice telemetry because self contains no driver number!") + raise ValueError("Cannot slice telemetry because self contains " + "no driver number!") if len(drv_num) > 1: - raise ValueError("Cannot slice telemetry because self contains Laps of multiple drivers!") + raise ValueError("Cannot slice telemetry because self contains " + "Laps of multiple drivers!") drv_num = drv_num[0] - car_data = self.session.car_data[drv_num].slice_by_lap(self, **kwargs).reset_index(drop=True) + car_data = self.session.car_data[drv_num] \ + .slice_by_lap(self, **kwargs) \ + .reset_index(drop=True) + return car_data def get_pos_data(self, **kwargs) -> Telemetry: - """Pos data for all laps in `self` + """ + Pos data for all laps in `self` - Slices the position data in :attr:`Session.pos_data` using this set of laps and returns the result. + Slices the position data in :attr:`Session.pos_data` using this set + of laps and returns the result. - .. note:: Position data can only be returned if `self` contains laps of one driver only. + .. note:: Position data can only be returned if `self` contains laps + of one driver only. Args: - **kwargs: Keyword arguments are passed to :meth:`Telemetry.slice_by_lap` + **kwargs: Keyword arguments are passed to + :meth:`Telemetry.slice_by_lap` Returns: instance of :class:`Telemetry` """ drv_num = self['DriverNumber'].unique() if len(drv_num) == 0: - raise ValueError("Cannot slice telemetry because self contains no driver number!") + raise ValueError("Cannot slice telemetry because self contains " + "no driver number!") if len(drv_num) > 1: - raise ValueError("Cannot slice telemetry because self contains Laps of multiple drivers!") + raise ValueError("Cannot slice telemetry because self contains " + "Laps of multiple drivers!") drv_num = drv_num[0] - pos_data = self.session.pos_data[drv_num].slice_by_lap(self, **kwargs).reset_index(drop=True) + pos_data = self.session.pos_data[drv_num] \ + .slice_by_lap(self, **kwargs) \ + .reset_index(drop=True) return pos_data def get_weather_data(self) -> pd.DataFrame: @@ -2675,7 +2816,7 @@ def get_weather_data(self) -> pd.DataFrame: 274 0 days 00:36:47.787000 VER ... 339 1.1 [275 rows x 38 columns] - """ + """ # noqa: E501 (due to long examples and doctest output) wd = [lap.get_weather_data() for _, lap in self.iterrows()] if wd: return pd.concat(wd, axis=1).T @@ -2789,7 +2930,8 @@ def pick_team(self, name: str) -> "Laps": mercedes = session_laps.pick_team('Mercedes') alfa_romeo = session_laps.pick_team('Alfa Romeo') - Have a look to :attr:`fastf1.plotting.TEAM_COLORS` for a quick reference on team names. + Have a look to :attr:`fastf1.plotting.TEAM_COLORS` for a quick + reference on team names. Args: name (str): Team name @@ -2967,7 +3109,8 @@ def pick_wo_box(self) -> "Laps": Returns: instance of :class:`Laps` """ - return self[pd.isnull(self['PitInTime']) & pd.isnull(self['PitOutTime'])] + return self[pd.isnull(self['PitInTime']) + & pd.isnull(self['PitOutTime'])] def pick_box_laps(self, which: str = 'both') -> "Laps": """Return all laps which are either in-laps, out-laps, or both. @@ -3105,9 +3248,11 @@ def iterlaps(self, require: Optional[Iterable] = None) \ class Lap(pd.Series): - """Object for accessing lap (timing) data of a single lap. + """ + Object for accessing lap (timing) data of a single lap. - This class wraps :class:`pandas.Series`. It provides extra functionality for accessing a lap's associated + This class wraps :class:`pandas.Series`. It provides extra functionality + for accessing a lap's associated telemetry data. """ _metadata = ['session'] @@ -3128,8 +3273,9 @@ def _new(*args, **kwargs): def telemetry(self) -> Telemetry: """Telemetry data for this lap - This is a cached (!) property for :meth:`get_telemetry`. It will return the same value as `get_telemetry` - but cache the result so that the involved processing is only done once. + This is a cached (!) property for :meth:`get_telemetry`. It will + return the same value as `get_telemetry` but cache the result so + that the involved processing is only done once. This is mainly provided for convenience and backwards compatibility. @@ -3187,32 +3333,42 @@ def get_telemetry(self, def get_car_data(self, **kwargs) -> Telemetry: """Car data for this lap - Slices the car data in :attr:`Session.car_data` using this lap and returns the result. + Slices the car data in :attr:`Session.car_data` using this lap and + returns the result. - The data returned by this method does not contain computed telemetry channels. The can be added by calling the - appropriate `add_*()` method on the returned telemetry object. + The data returned by this method does not contain computed telemetry + channels. The can be added by calling the appropriate `add_*()` + method on the returned telemetry object. Args: - **kwargs: Keyword arguments are passed to :meth:`Telemetry.slice_by_lap` + **kwargs: Keyword arguments are passed to + :meth:`Telemetry.slice_by_lap` Returns: instance of :class:`Telemetry` """ - car_data = self.session.car_data[self['DriverNumber']].slice_by_lap(self, **kwargs).reset_index(drop=True) + car_data = self.session.car_data[self['DriverNumber']] \ + .slice_by_lap(self, **kwargs) \ + .reset_index(drop=True) return car_data def get_pos_data(self, **kwargs) -> Telemetry: """Pos data for all laps in `self` - Slices the position data in :attr:`Session.pos_data` using this lap and returns the result. + Slices the position data in :attr:`Session.pos_data` using this lap + and returns the result. Args: - **kwargs: Keyword arguments are passed to :meth:`Telemetry.slice_by_lap` + **kwargs: Keyword arguments are passed to + :meth:`Telemetry.slice_by_lap` Returns: instance of :class:`Telemetry` """ - pos_data = self.session.pos_data[self['DriverNumber']].slice_by_lap(self, **kwargs).reset_index(drop=True) + pos_data = self.session.pos_data[self['DriverNumber']] \ + .slice_by_lap(self, **kwargs) \ + .reset_index(drop=True) + return pos_data def get_weather_data(self) -> pd.Series: @@ -3291,8 +3447,8 @@ class SessionResults(pd.DataFrame): drivers permanent number) - ``BroadcastName`` | :class:`str` | - First letter of the drivers first name plus the drivers full last name - in all capital letters. (e.g. 'P GASLY') + First letter of the drivers first name plus the drivers full last + name in all capital letters. (e.g. 'P GASLY') - ``FullName`` | :class:`str` | The drivers full name (e.g. "Pierre Gasly") @@ -3505,10 +3661,14 @@ class DataNotLoadedError(Exception): class NoLapDataError(Exception): - """Raised if the API request does not fail but there is no usable data after processing the result.""" - + """ + Raised if the API request does not fail but there is no usable data + after processing the result. + """ def __init__(self, *args): - super(NoLapDataError, self).__init__("Failed to load session because the API did not provide any usable data.") + super(NoLapDataError, self).__init__("Failed to load session because " + "the API did not provide any " + "usable data.") class InvalidSessionError(Exception): diff --git a/fastf1/ergast/structure.py b/fastf1/ergast/structure.py index ee187ecd6..9dbf455cb 100644 --- a/fastf1/ergast/structure.py +++ b/fastf1/ergast/structure.py @@ -94,8 +94,10 @@ def timedelta_from_ergast(t_str) -> Optional[datetime.timedelta]: def save_int(i_str) -> int: - """Create an ``int`` object from a string that is formatted like an integer. - In cases where the input string is not a valid integer, return -1. See #432 + """ + Create an ``int`` object from a string that is formatted like an + integer. In cases where the input string is not a valid integer, + return -1. See #432 """ # Match pure integer strings, e.g. # - '1234' -> 1234 diff --git a/fastf1/events.py b/fastf1/events.py index 042bb7962..6c214d980 100644 --- a/fastf1/events.py +++ b/fastf1/events.py @@ -491,7 +491,9 @@ def get_events_remaining( backend: Optional[Literal['fastf1', 'f1timing', 'ergast']] = None, force_ergast: bool = False ) -> 'EventSchedule': - """Create an :class:`~fastf1.events.EventSchedule` object for remaining season. + """ + Create an :class:`~fastf1.events.EventSchedule` object for remaining + season. Args: dt: Optional DateTime to get events after. @@ -919,7 +921,8 @@ def get_event_by_name( event for the British Grand Prix. strict_search: Search only for exact query matches instead of using fuzzy search. For example, - ``.get_event_by_name("British Grand Prix", strict_search=True)`` # noqa: E501 + ``.get_event_by_name("British Grand Prix", + strict_search=True)`` will return the event for the British Grand Prix, whereas ``.get_event_by_name("British", strict_search=True)`` will return ``None`` diff --git a/fastf1/livetiming/__init__.py b/fastf1/livetiming/__init__.py index f8c33bf46..eb3da130d 100644 --- a/fastf1/livetiming/__init__.py +++ b/fastf1/livetiming/__init__.py @@ -82,16 +82,16 @@ You only need to use ``forece_renew=True`` once after modifying the input data. -- The SignalR Client seems to get disconnected after 2 hours of recording. It looks - like the connection is terminated by the server. You need to manually start a - second recording before the first one disconnects if you want to have no gap in - your recording. +- The SignalR Client seems to get disconnected after 2 hours of recording. It + looks like the connection is terminated by the server. You need to manually + start a second recording before the first one disconnects if you want to + have no gap in your recording. - Use a different output file name for the second (or any subsequent) recording. - You can then load :class:`.data.LiveTimingData` from multiple files. The files need - to be provided in chronological order. The content of the files may overlap. - Data from overlapping recordings is recognized and will not be loaded as a - duplicate. + Use a different output file name for the second (or any subsequent) + recording. You can then load :class:`.data.LiveTimingData` from multiple + files. The files need to be provided in chronological order. The content of + the files may overlap. Data from overlapping recordings is recognized and + will not be loaded as a duplicate. @@ -116,16 +116,20 @@ .. code-block:: console - usage: python -m fastf1.livetiming save [-h] [--append] [--debug] [--timeout TIMEOUT] file + usage: python -m fastf1.livetiming save [-h] [--append] [--debug] + [--timeout TIMEOUT] file positional arguments: file Output file name optional arguments: -h, --help show this help message and exit - --append Append to output file. By default the file is overwritten if it exists already. - --debug Enable debug mode: save full SignalR message, not just the data. - --timeout TIMEOUT Timeout in seconds after which the client will automatically exit if no data is received + --append Append to output file. By default the file is + overwritten if it exists already. + --debug Enable debug mode: save full SignalR message, not + just the data. + --timeout TIMEOUT Timeout in seconds after which the client will + automatically exit if no data is received Extract @@ -133,10 +137,11 @@ **Only for when data was saved with the optional '--debug' argument** -Recording in debug mode saves the full SignalR messages as received. The non debug mode saves only the -important data part of a message. The data part of each message needs to be extracted to utilize the debug-mode -data. -The extracted data is the same data you get when saving without the '--debug' argument. +Recording in debug mode saves the full SignalR messages as received. The non +debug mode saves only the important data part of a message. The data part of +each message needs to be extracted to utilize the debug-mode data. +The extracted data is the same data you get when saving without the '--debug' +argument. .. code-block:: console diff --git a/fastf1/livetiming/client.py b/fastf1/livetiming/client.py index dfc670174..cac6b6297 100644 --- a/fastf1/livetiming/client.py +++ b/fastf1/livetiming/client.py @@ -152,7 +152,8 @@ async def _run(self): self._connection.error += self._on_debug # Assign debug message handler to save raw responses self._connection.received += self._on_debug - hub.client.on('feed', self._on_do_nothing) # need to connect an async method + hub.client.on('feed', self._on_do_nothing) + # need to connect an async method else: # Assign hub message handler hub.client.on('feed', self._on_message) diff --git a/fastf1/livetiming/data.py b/fastf1/livetiming/data.py index 24ae868eb..bbd181c4d 100644 --- a/fastf1/livetiming/data.py +++ b/fastf1/livetiming/data.py @@ -161,7 +161,8 @@ def _add_to_category(self, cat, entry): def _parse_session_data(self, msg): # make sure the categories exist as we want to append to them if 'TrackStatus' not in self.data.keys(): - self.data['TrackStatus'] = {'Time': [], 'Status': [], 'Message': []} + self.data['TrackStatus'] = {'Time': [], 'Status': [], + 'Message': []} if 'SessionStatus' not in self.data.keys(): self.data['SessionStatus'] = {'Time': [], 'Status': []} diff --git a/fastf1/plotting.py b/fastf1/plotting.py index 5c1852806..d22454ee5 100644 --- a/fastf1/plotting.py +++ b/fastf1/plotting.py @@ -1,8 +1,8 @@ """ Helper functions for creating data plots. -:mod:`fastf1.plotting` provides optional functionality with the intention of making -it easy to create nice plots. +:mod:`fastf1.plotting` provides optional functionality with the intention of +making it easy to create nice plots. This module offers mainly two things: - team names and colors @@ -44,9 +44,10 @@ import warnings with warnings.catch_warnings(): - warnings.filterwarnings('ignore', message="Using slow pure-python SequenceMatcher") - # suppress that warning, it's confusing at best here, we don't need fast sequence matching - # and the installation (on windows) some effort + warnings.filterwarnings('ignore', + message="Using slow pure-python SequenceMatcher") + # suppress that warning, it's confusing at best here, we don't need fast + # sequence matching and the installation (on windows) some effort from thefuzz import fuzz @@ -197,12 +198,13 @@ def setup_mpl( you wish to customize the tick formatting for timedelta. color_scheme (str, None): - This enables the Fast-F1 color scheme that you can see in all example - images. + This enables the Fast-F1 color scheme that you can see in all + example images. Valid color scheme names are: ['fastf1', None] misc_mpl_mods (bool): - This enables a collection of patches for the following mpl features: + This enables a collection of patches for the following mpl + features: - ``.savefig`` (saving of figures) - ``.bar``/``.barh`` (plotting of bar graphs) @@ -247,7 +249,8 @@ def driver_color(identifier: str) -> str: '#dc0000' Args: - identifier (str): Abbreviation or uniquely identifying name of the driver. + identifier (str): Abbreviation or uniquely identifying name of the + driver. Returns: str: hex color code @@ -319,7 +322,8 @@ def team_color(identifier: str) -> str: '#00d2be' Args: - identifier (str): Abbreviation or uniquely identifying name of the team. + identifier (str): Abbreviation or uniquely identifying name of the + team. Returns: str: hex color code @@ -369,7 +373,8 @@ def lapnumber_axis(ax, axis='xaxis'): the modified axis instance """ - getattr(ax, axis).get_major_locator().set_params(integer=True, min_n_ticks=0) + getattr(ax, axis).get_major_locator().set_params(integer=True, + min_n_ticks=0) return ax diff --git a/fastf1/utils.py b/fastf1/utils.py index 40e194800..55ad749b2 100644 --- a/fastf1/utils.py +++ b/fastf1/utils.py @@ -28,11 +28,11 @@ def delta_time( modified or removed at a future point. .. warning:: This is a nice gimmick but not actually very accurate which - is an inherent problem from the way this is calculated currently (There - may not be a better way though). In comparison with the sector times and the - differences that can be calculated from these, there are notable differences! - You should always verify the result against sector time differences or find a - different way for verification. + is an inherent problem from the way this is calculated currently + (There may not be a better way though). In comparison with the sector + times and the differences that can be calculated from these, there are + notable differences! You should always verify the result against + sector time differences or find a different way for verification. Here is an example that compares the quickest laps of Leclerc and Hamilton from Bahrain 2021 Qualifying: @@ -95,7 +95,9 @@ def mini_pro(stream): # Ensure that all samples are interpolated dstream_start = stream[1] - stream[0] dstream_end = stream[-1] - stream[-2] - return np.concatenate([[stream[0] - dstream_start], stream, [stream[-1] + dstream_end]]) + return np.concatenate( + [[stream[0] - dstream_start], stream, [stream[-1] + dstream_end]] + ) ltime = mini_pro(comp['Time'].dt.total_seconds().to_numpy()) ldistance = mini_pro(comp['Distance'].to_numpy()) @@ -107,8 +109,8 @@ def mini_pro(stream): def recursive_dict_get(d: Dict, *keys: str, default_none: bool = False): - """Recursive dict get. Can take an arbitrary number of keys and returns an empty - dict if any key does not exist. + """Recursive dict get. Can take an arbitrary number of keys and returns an + empty dict if any key does not exist. https://stackoverflow.com/a/28225747""" ret = reduce(lambda c, k: c.get(k, {}), keys, d) if default_none and ret == {}: