diff --git a/CHANGELOG.md b/CHANGELOG.md index dd94a2063..e8f8084d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Code freeze date: YYYY-MM-DD ### Added +- `climada.hazard.tc_tracks.TCTracks.from_FAST` function, add Australia basin (AU) [#993](https://github.com/CLIMADA-project/climada_python/pull/993) - Add `osm-flex` package to CLIMADA core [#981](https://github.com/CLIMADA-project/climada_python/pull/981) - `doc.tutorial.climada_entity_Exposures_osm.ipynb` tutorial explaining how to use `osm-flex`with CLIMADA - `climada.util.coordinates.bounding_box_global` function [#980](https://github.com/CLIMADA-project/climada_python/pull/980) diff --git a/climada/hazard/tc_tracks.py b/climada/hazard/tc_tracks.py index fce41053a..ec63d0d90 100644 --- a/climada/hazard/tc_tracks.py +++ b/climada/hazard/tc_tracks.py @@ -162,6 +162,7 @@ "SI": 1005, "WP": 1005, "SP": 1004, + "AU": 1004, } """Basin-specific default environmental pressure""" @@ -1619,6 +1620,118 @@ def from_netcdf(cls, folder_name): data.append(track) return cls(data) + @classmethod + def from_FAST(cls, folder_name: str): + """Create a new TCTracks object from NetCDF files generated by the FAST model, modifying + the xr.array structure to ensure compatibility with CLIMADA, and calculating the central + pressure and radius of maximum wind. + + Model GitHub Repository: https://github.com/linjonathan/tropical_cyclone_risk? + tab=readme-ov-file + Model Publication: https://agupubs.onlinelibrary.wiley.com/doi/epdf/10.1029/2023MS003686 + + Parameters: + ---------- + folder_name : str + Folder name from where to read files. + storm_id : int + Number of the simulated storm + + Returns: + ------- + tracks : TCTracks + TCTracks object with tracks data from the given directory of NetCDF files. + """ + + LOGGER.info("Reading %s files.", len(get_file_names(folder_name))) + data = [] + for file in get_file_names(folder_name): + if Path(file).suffix != ".nc": + continue + with xr.open_dataset(file) as dataset: + for year in dataset.year: + for i in dataset.n_trk: + + # Select track + track = dataset.sel(n_trk=i, year=year) + # chunk dataset at first NaN value + lon = track.lon_trks.data + last_valid_index = np.where(np.isfinite(lon))[0][-1] + track = track.isel(time=slice(0, last_valid_index + 1)) + # Select lat, lon + lat = track.lat_trks.data + lon = track.lon_trks.data + # Convert lon from 0-360 to -180 - 180 + lon = ((lon + 180) % 360) - 180 + # Convert time to pandas Datetime "yyyy.mm.dd" + reference_time = ( + f"{track.tc_years.item()}-{int(track.tc_month.item())}-01" + ) + time = pd.to_datetime( + track.time.data, unit="s", origin=reference_time + ).astype("datetime64[s]") + # Define variables + ms_to_kn = 1.943844 + max_wind_kn = track.vmax_trks.data * ms_to_kn + env_pressure = BASIN_ENV_PRESSURE[track.tc_basins.data.item()] + cen_pres = _estimate_pressure( + np.full(lat.shape, np.nan), + lat, + lon, + max_wind_kn, + ) + + data.append( + xr.Dataset( + { + "time_step": ( + "time", + np.full(time.shape[0], track.time.data[1]), + ), + "max_sustained_wind": ( + "time", + track.vmax_trks.data, + ), + "central_pressure": ("time", cen_pres), + "radius_max_wind": ( + "time", + estimate_rmw( + np.full(lat.shape, np.nan), cen_pres + ), + ), + "environmental_pressure": ( + "time", + np.full(time.shape[0], env_pressure), + ), + "basin": ( + "time", + np.full( + time.shape[0], track.tc_basins.data.item() + ), + ), + }, + coords={ + "time": ("time", time), + "lat": ("time", lat), + "lon": ("time", lon), + }, + attrs={ + "max_sustained_wind_unit": "m/s", + "central_pressure_unit": "hPa", + "name": f"storm_{track.n_trk.item()}", + "sid": track.n_trk.item(), + "orig_event_flag": True, + "data_provider": "FAST", + "id_no": track.n_trk.item(), + "category": set_category( + max_wind_kn, wind_unit="kn", saffir_scale=None + ), + }, + ) + ) + + return cls(data) + def write_hdf5(self, file_name, complevel=5): """Write TC tracks in NetCDF4-compliant HDF5 format. @@ -2665,12 +2778,12 @@ def ibtracs_fit_param(explained, explanatory, year_range=(1980, 2019), order=1): return sm_results -def ibtracs_track_agency(ds_sel): +def ibtracs_track_agency(track): """Get preferred IBTrACS agency for each entry in the dataset. Parameters ---------- - ds_sel : xarray.Dataset + track : xarray.Dataset Subselection of original IBTrACS NetCDF dataset. Returns @@ -2678,7 +2791,7 @@ def ibtracs_track_agency(ds_sel): agency_pref : list of str Names of IBTrACS agencies in order of preference. track_agency_ix : xarray.DataArray of ints - For each entry in `ds_sel`, the agency to use, given as an index into `agency_pref`. + For each entry in `track`, the agency to use, given as an index into `agency_pref`. """ agency_pref = ["wmo"] + IBTRACS_AGENCIES agency_map = {a.encode("utf-8"): i for i, a in enumerate(agency_pref)} @@ -2687,11 +2800,11 @@ def ibtracs_track_agency(ds_sel): ) agency_map[b""] = agency_map[b"wmo"] agency_fun = lambda x: agency_map[x] - if "track_agency" not in ds_sel.data_vars.keys(): - ds_sel["track_agency"] = ds_sel["wmo_agency"].where( - ds_sel["wmo_agency"] != b"", ds_sel["usa_agency"] + if "track_agency" not in track.data_vars.keys(): + track["track_agency"] = track["wmo_agency"].where( + track["wmo_agency"] != b"", track["usa_agency"] ) - track_agency_ix = xr.apply_ufunc(agency_fun, ds_sel["track_agency"], vectorize=True) + track_agency_ix = xr.apply_ufunc(agency_fun, track["track_agency"], vectorize=True) return agency_pref, track_agency_ix diff --git a/climada/hazard/test/data/FAST_test_tracks.nc b/climada/hazard/test/data/FAST_test_tracks.nc new file mode 100644 index 000000000..dfe9d8b71 Binary files /dev/null and b/climada/hazard/test/data/FAST_test_tracks.nc differ diff --git a/climada/hazard/test/test_tc_tracks.py b/climada/hazard/test/test_tc_tracks.py index 56005b51a..2828fbfe3 100644 --- a/climada/hazard/test/test_tc_tracks.py +++ b/climada/hazard/test/test_tc_tracks.py @@ -44,6 +44,7 @@ TEST_TRACK_EMANUEL = DATA_DIR.joinpath("emanuel_test_tracks.mat") TEST_TRACK_EMANUEL_CORR = DATA_DIR.joinpath("temp_mpircp85cal_full.mat") TEST_TRACK_CHAZ = DATA_DIR.joinpath("chaz_test_tracks.nc") +TEST_TRACK_FAST = DATA_DIR.joinpath("FAST_test_tracks.nc") TEST_TRACK_STORM = DATA_DIR.joinpath("storm_test_tracks.txt") TEST_TRACKS_ANTIMERIDIAN = DATA_DIR.joinpath("tracks-antimeridian") TEST_TRACKS_LEGACY_HDF5 = DATA_DIR.joinpath("tctracks_hdf5_legacy.nc") @@ -631,6 +632,51 @@ def test_from_simulations_storm(self): tc_track = tc.TCTracks.from_simulations_storm(TEST_TRACK_STORM, years=[7]) self.assertEqual(len(tc_track.data), 0) + def test_from_FAST(self): + """test the correct import of netcdf files from FAST model and the conversion to a + different xr.array structure compatible with CLIMADA.""" + + tc_track = tc.TCTracks.from_FAST(TEST_TRACK_FAST) + + expected_attributes = { + "max_sustained_wind_unit": "m/s", + "central_pressure_unit": "hPa", + "name": "storm_0", + "sid": 0, + "orig_event_flag": True, + "data_provider": "FAST", + "id_no": 0, + "category": 1, + } + + self.assertIsInstance( + tc_track, tc.TCTracks, "tc_track is not an instance of TCTracks" + ) + self.assertIsInstance( + tc_track.data, list, "tc_track.data is not an instance of list" + ) + self.assertIsInstance( + tc_track.data[0], + xr.Dataset, + "tc_track.data[0] not an instance of xarray.Dataset", + ) + self.assertEqual(len(tc_track.data), 5) + self.assertEqual(tc_track.data[0].attrs, expected_attributes) + self.assertEqual(list(tc_track.data[0].coords.keys()), ["time", "lat", "lon"]) + self.assertEqual( + tc_track.data[0].time.values[0], + np.datetime64("2025-09-01T00:00:00.000000000"), + ) + self.assertEqual(tc_track.data[0].lat.values[0], 17.863591350508266) + self.assertEqual(tc_track.data[0].lon.values[0], -71.76441758319629) + self.assertEqual(len(tc_track.data[0].time), 35) + self.assertEqual(tc_track.data[0].time_step[0], 10800) + self.assertEqual( + tc_track.data[0].max_sustained_wind.values[10], 24.71636959089841 + ) + self.assertEqual(tc_track.data[0].environmental_pressure.data[0], 1010) + self.assertEqual(tc_track.data[0].basin[0], "NA") + def test_to_geodataframe_points(self): """Conversion of TCTracks to GeoDataFrame using Points.""" tc_track = tc.TCTracks.from_processed_ibtracs_csv(TEST_TRACK) diff --git a/doc/tutorial/climada_hazard_TropCyclone.ipynb b/doc/tutorial/climada_hazard_TropCyclone.ipynb index 0613fa7fd..28d80d1ac 100644 --- a/doc/tutorial/climada_hazard_TropCyclone.ipynb +++ b/doc/tutorial/climada_hazard_TropCyclone.ipynb @@ -1802,7 +1802,7 @@ " \n", "### d) Load TC tracks from other sources\n", "\n", - "In addition to the [historical records of TCs (IBTrACS)](#Part1.a), the [probabilistic extension](#Part1.b) of these tracks, and the [ECMWF Forecast tracks](#Part1.c), CLIMADA also features functions to read in synthetic TC tracks from other sources. These include synthetic storm tracks from Kerry Emanuel's coupled statistical-dynamical model (Emanuel et al., 2006 as used in Geiger et al., 2016), synthetic storm tracks from a second coupled statistical-dynamical model (CHAZ) (as described in Lee et al., 2018), and synthetic storm tracks from a fully statistical model (STORM) Bloemendaal et al., 2020). However, these functions are partly under development and/or targeted at advanced users of CLIMADA in the context of very specific use cases. They are thus not covered in this tutorial." + "In addition to the [historical records of TCs (IBTrACS)](#Part1.a), the [probabilistic extension](#Part1.b) of these tracks, and the [ECMWF Forecast tracks](#Part1.c), CLIMADA also features functions to read in synthetic TC tracks from other sources. These include synthetic storm tracks from Kerry Emanuel's coupled statistical-dynamical model (Emanuel et al., 2006 as used in Geiger et al., 2016), from an open source derivative of Kerry Emanuel's model [FAST](https://github.com/linjonathan/tropical_cyclone_risk?tab=readme-ov-file), synthetic storm tracks from a second coupled statistical-dynamical model (CHAZ) (as described in Lee et al., 2018), and synthetic storm tracks from a fully statistical model (STORM) Bloemendaal et al., 2020). However, these functions are partly under development and/or targeted at advanced users of CLIMADA in the context of very specific use cases. They are thus not covered in this tutorial." ] }, {