diff --git a/Core/LAMBDA/layers/main.tf b/Core/LAMBDA/layers/main.tf index 889731a9..c3d32c5b 100644 --- a/Core/LAMBDA/layers/main.tf +++ b/Core/LAMBDA/layers/main.tf @@ -314,5 +314,5 @@ output "requests" { } output "yaml" { - value = resource.aws_lambda_layer_version.yaml + value = resource.aws_lambda_layer_version.dask } diff --git a/Core/LAMBDA/layers/viz_lambda_shared_funcs/python/viz_classes.py b/Core/LAMBDA/layers/viz_lambda_shared_funcs/python/viz_classes.py index 8be7ce5f..aaebc4fd 100644 --- a/Core/LAMBDA/layers/viz_lambda_shared_funcs/python/viz_classes.py +++ b/Core/LAMBDA/layers/viz_lambda_shared_funcs/python/viz_classes.py @@ -51,7 +51,8 @@ def get_db_engine(self): def get_db_connection(self, asynchronous=False): import psycopg2 db_host, db_name, db_user, db_password = self.get_db_credentials() - connection = psycopg2.connect(f"host={db_host} dbname={db_name} user={db_user} password={db_password}", async_=asynchronous) + port = 5439 if self.type == "REDSHIFT" else 5432 + connection = psycopg2.connect(f"host={db_host} dbname={db_name} user={db_user} password={db_password} port={port}", async_=asynchronous) print(f"***> Established db connection to: {db_host} from {inspect.stack()[1].function}()") return connection @@ -387,6 +388,8 @@ def get_s3_prefix(configuration_name, date): prefix = f"max_stage/ahps/{date}/" else: nwm_dataflow_version = os.environ.get("NWM_DATAFLOW_VERSION") if os.environ.get("NWM_DATAFLOW_VERSION") else "prod" + if configuration_name == 'medium_range_ensemble': + configuration_name == 'medium_range_mem6' prefix = f"common/data/model/com/nwm/{nwm_dataflow_version}/nwm.{date}/{configuration_name}/" return prefix diff --git a/Core/LAMBDA/viz_functions/image_based/viz_hand_fim_processing/lambda_function.py b/Core/LAMBDA/viz_functions/image_based/viz_hand_fim_processing/lambda_function.py index e7970cb9..55f6c789 100644 --- a/Core/LAMBDA/viz_functions/image_based/viz_hand_fim_processing/lambda_function.py +++ b/Core/LAMBDA/viz_functions/image_based/viz_hand_fim_processing/lambda_function.py @@ -51,6 +51,15 @@ def lambda_handler(event, context): branch = huc8_branch.split("-")[1] s3_path_piece = '' + # Get db table names and setup db connection + print(f"Adding data to {db_fim_table}")# Only process inundation configuration if available data + db_schema = db_fim_table.split(".")[0] + db_table = db_fim_table.split(".")[-1] + if any(x in db_schema for x in ["aep", "fim_catchments", "catfim"]): + process_db = database(db_type="egis") + else: + process_db = database(db_type="viz") + if "catchments" in db_fim_table: df_inundation = create_inundation_catchment_boundary(huc8, branch) else: @@ -76,12 +85,19 @@ def lambda_handler(event, context): rating_curve_exists = s3_file(FIM_BUCKET, rating_curve_key).check_existence() stage_lookup = pd.DataFrame() + df_zero_stage_records = pd.DataFrame() if catch_exists and hand_exists and rating_curve_exists: print("->Calculating flood depth") - stage_lookup = calculate_stage_values(rating_curve_key, data_bucket, subsetted_data, huc8_branch) # get stages + stage_lookup, df_zero_stage_records = calculate_stage_values(rating_curve_key, data_bucket, subsetted_data, huc8_branch) # get stages else: print(f"catchment, hand, or rating curve are missing for huc {huc8} and branch {branch}:\nCatchment exists: {catch_exists} ({catchment_key})\nHand exists: {hand_exists} ({hand_key})\nRating curve exists: {rating_curve_exists} ({rating_curve_key})") - + + # Upload zero_stage reaches for tracking / FIM cache + print(f"Adding zero stage data to {db_table}_zero_stage")# Only process inundation configuration if available data + df_zero_stage_records['branch'] = int(branch) + df_zero_stage_records['huc8'] = int(huc8) + df_zero_stage_records.to_sql(f"{db_table}_zero_stage", con=process_db.engine, schema=db_schema, if_exists='append', index=True) + # If no features with above zero stages are present, then just copy an unflood raster instead of processing nothing if stage_lookup.empty: print("No reaches with valid stages") @@ -90,17 +106,27 @@ def lambda_handler(event, context): # Run the desired configuration df_inundation = create_inundation_output(huc8, branch, stage_lookup, reference_time, input_variable) + # Split geometry into seperate table per new schema + df_inundation_geo = df_inundation[['hydro_id', 'feature_id', 'huc8', 'branch', 'rc_stage_ft', 'geom']] + df_inundation.drop(columns=['geom'], inplace=True) + + # If records exist in stage_lookup that don't exist in df_inundation, add those to the zero_stage table. + df_no_inundation = stage_lookup.merge(df_inundation.drop_duplicates(), on=['feature_id','hydro_id'],how='left',indicator=True) + df_no_inundation = df_no_inundation.loc[df_no_inundation['_merge'] == 'left_only'] + if df_no_inundation.empty == False: + df_no_inundation.drop(df_no_inundation.columns.difference(['hydro_id','feature_id','huc8','branch','rc_discharge_cms','note']), axis=1, inplace=True) + df_no_inundation['branch'] = int(branch) + df_no_inundation['huc8'] = int(huc8) + df_no_inundation['note'] = "Error - No inundation returned from hand processing." + df_no_inundation.to_sql(f"{db_table}_zero_stage", con=process_db.engine, schema=db_schema, if_exists='append', index=False) + # If no records exist for valid inundation, stop. + if df_inundation.empty: + return + print(f"Adding data to {db_fim_table}")# Only process inundation configuration if available data - db_schema = db_fim_table.split(".")[0] - db_table = db_fim_table.split(".")[-1] - try: - if any(x in db_schema for x in ["aep", "fim_catchments", "catfim"]): - process_db = database(db_type="egis") - else: - process_db = database(db_type="viz") - - df_inundation.to_postgis(db_table, con=process_db.engine, schema=db_schema, if_exists='append') + df_inundation.to_sql(db_table, con=process_db.engine, schema=db_schema, if_exists='append', index=False) + df_inundation_geo.to_postgis(f"{db_table}_geo", con=process_db.engine, schema=db_schema, if_exists='append') except Exception as e: process_db.engine.dispose() raise Exception(f"Failed to add inundation data to DB for {huc8}-{branch} - ({e})") @@ -265,7 +291,9 @@ def create_inundation_output(huc8, branch, stage_lookup, reference_time, input_v catchment_nodata = int(catchment_dataset.nodata) # get no_data value for catchment raster valid_catchments = stage_lookup.index.tolist() # parse lookup to get features with >0 stages # noqa hydroids = stage_lookup.index.tolist() # parse lookup to get all features - stages = stage_lookup['stage_m'].tolist() # parse lookup to get all stages + + # Notable FIM Caching Change: Now using rc_stage_m (the upper value of the current hydrotable interval), instead of the interpolated stage, for inundation extent generation. + stages = stage_lookup['rc_stage_m'].tolist() # parse lookup to get all stages k = np.array(hydroids) # Create a feature numpy array from the list v = np.array(stages) # Create a stage numpy array from the list @@ -412,6 +440,13 @@ def process(window): print("dropping duplicates") df_final = df_final.drop_duplicates() + print("Converting m columns to ft") + df_final['rc_stage_ft'] = (df_final['rc_stage_m'] * 3.28084).astype(int) + df_final['rc_previous_stage_ft'] = round(df_final['rc_previous_stage_m'] * 3.28084, 2) + df_final['rc_discharge_cfs'] = round(df_final['rc_discharge_cms'] * 35.315, 2) + df_final['rc_previous_discharge_cfs'] = round(df_final['rc_previous_discharge_cms'] * 35.315, 2) + df_final = df_final.drop(columns=["rc_stage_m", "rc_previous_stage_m", "rc_discharge_cms", "rc_previous_discharge_cms"]) + print("Adding additional metadata columns") df_final = df_final.reset_index() df_final = df_final.rename(columns={"index": "hydro_id"}) @@ -419,18 +454,18 @@ def process(window): df_final['reference_time'] = reference_time df_final['huc8'] = huc8 df_final['branch'] = branch - df_final['fim_stage_ft'] = round(df_final['stage_m'] * 3.28084, 2) - df_final['hydro_id_str'] = df_final['hydro_id'].astype(str) - df_final['feature_id_str'] = df_final['feature_id'].astype(str) + df_final['forecast_stage_ft'] = round(df_final['stage_m'] * 3.28084, 2) + df_final['prc_method'] = 'HAND_Processing' + #TODO: Check with Shawn on the whole stage configuration / necessarry changes if input_variable == 'stage': drop_columns = ['stage_m', 'huc8_branch', 'huc'] else: df_final['max_rc_stage_ft'] = df_final['max_rc_stage_m'] * 3.28084 df_final['max_rc_stage_ft'] = df_final['max_rc_stage_ft'].astype(int) - df_final['streamflow_cfs'] = round(df_final['streamflow_cms'] * 35.315, 2) + df_final['forecast_discharge_cfs'] = round(df_final['discharge_cms'] * 35.315, 2) df_final['max_rc_discharge_cfs'] = round(df_final['max_rc_discharge_cms'] * 35.315, 2) - drop_columns = ["stage_m", "max_rc_stage_m", "streamflow_cms", "max_rc_discharge_cms"] + drop_columns = ["stage_m", "max_rc_stage_m", "discharge_cms", "max_rc_discharge_cms", ] df_final = df_final.drop(columns=drop_columns) @@ -449,50 +484,74 @@ def s3_csv_to_df(bucket, key): def calculate_stage_values(hydrotable_key, subsetted_streams_bucket, subsetted_streams, huc8_branch): """ - Converts streamflow values to stage using the rating curve and linear interpolation because rating curve intervals + Converts discharge (streamflow) values to stage using the rating curve and linear interpolation because rating curve intervals Arguments: local_hydrotable (str): Path to local copy of the branch hydrotable - df_nwm (DataFrame): A pandas dataframe with columns for feature id and desired streamflow column + df_nwm (DataFrame): A pandas dataframe with columns for feature id and desired discharge column Returns: stage_dict (dict): A dictionary with the hydroid as the key and interpolated stage as the value """ df_hydro = s3_csv_to_df(FIM_BUCKET, hydrotable_key) df_hydro = df_hydro[['HydroID', 'feature_id', 'stage', 'discharge_cms', 'LakeID']] - - df_hydro_max = df_hydro.sort_values('stage').groupby('HydroID').tail(1) - df_hydro_max = df_hydro_max.set_index('HydroID') - df_hydro_max = df_hydro_max[['stage', 'discharge_cms']].rename(columns={'stage': 'max_rc_stage_m', 'discharge_cms': 'max_rc_discharge_cms'}) + df_hydro = df_hydro.rename(columns={'HydroID': 'hydro_id', 'stage': 'stage_m'}) + + df_hydro_max = df_hydro.sort_values('stage_m').groupby('hydro_id').tail(1) + df_hydro_max = df_hydro_max.set_index('hydro_id') + df_hydro_max = df_hydro_max[['stage_m', 'discharge_cms']].rename(columns={'stage_m': 'max_rc_stage_m', 'discharge_cms': 'max_rc_discharge_cms'}) df_forecast = s3_csv_to_df(subsetted_streams_bucket, subsetted_streams) df_forecast = df_forecast.loc[df_forecast['huc8_branch']==huc8_branch] - df_forecast['stage_m'] = df_forecast.apply(lambda row : interpolate_stage(row, df_hydro), axis=1) + df_forecast = df_forecast.rename(columns={'streamflow_cms': 'discharge_cms'}) #TODO: Change the output CSV to list discharge instead of streamflow for consistency? + df_forecast[['stage_m', 'rc_stage_m', 'rc_previous_stage_m', 'rc_discharge_cms', 'rc_previous_discharge_cms']] = df_forecast.apply(lambda row : interpolate_stage(row, df_hydro), axis=1).apply(pd.Series) + + df_forecast.drop(columns=['huc8_branch', 'huc'], inplace=True) + df_forecast = df_forecast.set_index('hydro_id') print(f"Removing {len(df_forecast[df_forecast['stage_m'].isna()])} reaches with a NaN interpolated stage") + df_zero_stage = df_forecast[df_forecast['stage_m'].isna()].copy() + df_zero_stage['note'] = "NaN Stage After Hydrotable Lookup" df_forecast = df_forecast[~df_forecast['stage_m'].isna()] print(f"Removing {len(df_forecast[df_forecast['stage_m']==0])} reaches with a 0 interpolated stage") + df_zero_stage = pd.concat([df_zero_stage, df_forecast[df_forecast['stage_m']==0].copy()], axis=0) + df_zero_stage['note'] = np.where(df_zero_stage.note.isnull(), "0 Stage After Hydrotable Lookup", np.NaN) df_forecast = df_forecast[df_forecast['stage_m']!=0] - df_forecast = df_forecast.drop(columns=['huc8_branch', 'huc']) - df_forecast = df_forecast.set_index('hydro_id') + df_zero_stage.drop(columns=['discharge_cms', 'stage_m', 'rc_stage_m', 'rc_previous_stage_m', 'rc_previous_discharge_cms'], inplace=True) + df_forecast = df_forecast.join(df_hydro_max) print(f"{len(df_forecast)} reaches will be processed") - return df_forecast + return df_forecast, df_zero_stage def interpolate_stage(df_row, df_hydro): hydro_id = df_row['hydro_id'] - forecast = df_row['streamflow_cms'] + forecast = df_row['discharge_cms'] - if hydro_id not in df_hydro['HydroID'].values: + if hydro_id not in df_hydro['hydro_id'].values: return np.nan - subet_hydro = df_hydro.loc[df_hydro['HydroID']==hydro_id, ['discharge_cms', 'stage']] - discharge = subet_hydro['discharge_cms'].values - stage = subet_hydro['stage'].values + # Filter the hydrotable to this hydroid and pull out discharge and stages into arrays + subset_hydro = df_hydro.loc[df_hydro['hydro_id']==hydro_id, ['discharge_cms', 'stage_m']] + discharges = subset_hydro['discharge_cms'].values + stages = subset_hydro['stage_m'].values - interpolated_stage = round(np.interp(forecast, discharge, stage), 2) + # Get the interpolated stage by using the discharge forecast value against the arrays + interpolated_stage = round(np.interp(forecast, discharges, stages), 2) + + # Get the upper and lower values of the 1-ft hydrotable array that the current forecast / interpolated stage is at + hydrotable_index = np.searchsorted(discharges, forecast, side='right') + + # If streamflow exceeds the rating curve max, just use the max value + if hydrotable_index >= len(stages): + hydrotable_index = hydrotable_index - 1 + + hydrotable_previous_index = hydrotable_index-1 + rc_stage = stages[hydrotable_index] + rc_previous_stage = stages[hydrotable_previous_index] + rc_discharge = discharges[hydrotable_index] + rc_previous_discharge = discharges[hydrotable_previous_index] - return interpolated_stage + return interpolated_stage, rc_stage, rc_previous_stage, rc_discharge, rc_previous_discharge diff --git a/Core/LAMBDA/viz_functions/viz_db_ingest/lambda_function.py b/Core/LAMBDA/viz_functions/viz_db_ingest/lambda_function.py index b4d5524a..b1f3f43b 100644 --- a/Core/LAMBDA/viz_functions/viz_db_ingest/lambda_function.py +++ b/Core/LAMBDA/viz_functions/viz_db_ingest/lambda_function.py @@ -130,7 +130,7 @@ def lambda_handler(event, context): except Exception as e: print(f"Error: {e}") raise e - + connection.close() dump_dict = { "file": file, "target_table": target_table, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/0a_redshift_create_inundation_tables_if_not_exist.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/0a_redshift_create_inundation_tables_if_not_exist.sql new file mode 100644 index 00000000..089bc63c --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/0a_redshift_create_inundation_tables_if_not_exist.sql @@ -0,0 +1,54 @@ +-- This creates the four tables on a Redshift db needed for a cached fim pipeline run. +-- These four tables exist on both RDS and Redshift, so any changes here will need to be synced with the RDS version as well - 0b_rds_create_inundation_tables_if_not_exist.sql +CREATE TABLE IF NOT EXISTS {rs_fim_table}_flows +( + feature_id integer, + hydro_id integer, + huc8 INTEGER, + branch bigint, + reference_time text, + discharge_cms double precision, + discharge_cfs double precision, + prc_status text, + PRIMARY KEY("hydro_id", "feature_id", "huc8", "branch") +); + +CREATE TABLE IF NOT EXISTS {rs_fim_table} ( + hydro_id integer, + feature_id integer, + huc8 integer, + branch bigint, + forecast_discharge_cfs double precision, + forecast_stage_ft double precision, + rc_discharge_cfs double precision, + rc_previous_discharge_cfs double precision, + rc_stage_ft double precision, + rc_previous_stage_ft double precision, + max_rc_discharge_cfs double precision, + max_rc_stage_ft double precision, + fim_version text, + reference_time text, + prc_method text, + PRIMARY KEY("hydro_id", "feature_id", "huc8", "branch") +) DISTSTYLE AUTO; + +CREATE TABLE IF NOT EXISTS {rs_fim_table}_geo ( + hydro_id integer, + feature_id integer, + huc8 INTEGER, + branch bigint, + rc_stage_ft integer, + geom_part integer, + geom geometry +) DISTSTYLE AUTO; + +CREATE TABLE IF NOT EXISTS {rs_fim_table}_zero_stage +( + feature_id integer, + hydro_id integer, + huc8 INTEGER, + branch bigint, + rc_discharge_cms double precision, + note text, + PRIMARY KEY("hydro_id", "feature_id", "huc8", "branch") +); diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/0b_rds_create_inundation_tables_if_not_exist.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/0b_rds_create_inundation_tables_if_not_exist.sql new file mode 100644 index 00000000..115c18ff --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/0b_rds_create_inundation_tables_if_not_exist.sql @@ -0,0 +1,70 @@ +-- This creates the four tables on a RDS db needed for a cached fim pipeline run. +-- These four tables exist on both RDS and Redshift, so any changes here will need to be synced with the Redshift version as well - 0a_redshift_create_inundation_tables_if_not_exist.sql +CREATE TABLE IF NOT EXISTS {db_fim_table}_flows +( + hydro_id integer, + feature_id integer, + huc8 integer, + branch bigint, + reference_time text, + discharge_cms double precision, + discharge_cfs double precision, + prc_status text +); + +CREATE TABLE IF NOT EXISTS {db_fim_table} +( + hydro_id integer, + feature_id integer, + huc8 integer, + branch bigint, + forecast_discharge_cfs double precision, + forecast_stage_ft double precision, + rc_discharge_cfs double precision, + rc_previous_discharge_cfs double precision, + rc_stage_ft integer, + rc_previous_stage_ft double precision, + max_rc_stage_ft double precision, + max_rc_discharge_cfs double precision, + fim_version text, + reference_time text, + prc_method text +); + +CREATE TABLE IF NOT EXISTS {db_fim_table}_geo ( + hydro_id integer, + feature_id integer, + huc8 integer, + branch bigint, + rc_stage_ft integer, + geom_part integer, + geom geometry(geometry, 3857) +); + +CREATE TABLE IF NOT EXISTS {db_fim_table}_zero_stage ( + hydro_id integer, + feature_id integer, + huc8 integer, + branch bigint, + rc_discharge_cms double precision, + note text +); + + -- Create a view that contains subdivided polygons in WKT text, for import into Redshift + CREATE OR REPLACE VIEW {db_fim_table}_geo_view AS + SELECT fim_subdivide.hydro_id, + fim_subdivide.feature_id, + fim_subdivide.huc8, + fim_subdivide.branch, + fim_subdivide.rc_stage_ft, + 0 AS geom_part, + st_astext(fim_subdivide.geom) AS geom_wkt + FROM ( SELECT fim.hydro_id, + fim.feature_id, + fim.huc8, + fim.branch, + fim.rc_stage_ft, + st_subdivide(fim_geo.geom) AS geom + FROM {db_fim_table} fim + JOIN {db_fim_table}_geo fim_geo ON fim.hydro_id = fim_geo.hydro_id AND fim.feature_id = fim_geo.feature_id AND fim.huc8 = fim_geo.huc8 AND fim.branch = fim_geo.branch AND fim.rc_stage_ft = fim_geo.rc_stage_ft + WHERE fim.prc_method = 'HAND_Processing'::text) fim_subdivide; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/1a_rds_build_inundation_flows_table.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/1a_rds_build_inundation_flows_table.sql new file mode 100644 index 00000000..3658f0a8 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/1a_rds_build_inundation_flows_table.sql @@ -0,0 +1,22 @@ +-- This populates a standardized fim_flows table, filtered to high water threshold, on RDS. This is essentially the domain of a given fim run. +-- the prc_status columns is updated throughout the fim run with a status reflecting how fim is calculated for each reach (from ras2fim cache, from hand cache, hand processing, etc.) +-- This table is copied to Redshift in the next step (in order to query the cache there), but this table on RDS is the authoritative source as far as the prc_status column goes. +TRUNCATE {db_fim_table}_flows; +INSERT INTO {db_fim_table}_flows (feature_id, hydro_id, huc8, branch, reference_time, discharge_cms, discharge_cfs, prc_status) +SELECT + max_forecast.feature_id, + crosswalk.hydro_id, + crosswalk.huc8::integer, + crosswalk.branch_id AS branch, + to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, + max_forecast.discharge_cms, + max_forecast.discharge_cfs, + 'Pending' AS prc_status +FROM {max_flows_table} max_forecast +JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id +JOIN derived.fim4_featureid_crosswalk AS crosswalk ON max_forecast.feature_id = crosswalk.feature_id +WHERE + max_forecast.discharge_cfs >= rf.high_water_threshold AND + rf.high_water_threshold > 0::double precision AND + crosswalk.huc8 IS NOT NULL AND + crosswalk.lake_id = -999; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/1b_redshift_copy_inundation_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/1b_redshift_copy_inundation_flows.sql new file mode 100644 index 00000000..c4927f4d --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/1b_redshift_copy_inundation_flows.sql @@ -0,0 +1,6 @@ +-- Copy the fim_flows table on RDS to Redshift - this allows querying the hand cache on redshift by joining to this table. +TRUNCATE {rs_fim_table}_flows; +INSERT INTO {rs_fim_table}_flows (feature_id, hydro_id, huc8, branch, reference_time, discharge_cms, discharge_cfs, prc_status) +SELECT + feature_id, hydro_id, huc8, branch, reference_time, discharge_cms, discharge_cfs, prc_status +FROM external_viz_{db_fim_table}_flows; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/2a_redshift_query_cached_fim_table.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/2a_redshift_query_cached_fim_table.sql new file mode 100644 index 00000000..abeb6d91 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/2a_redshift_query_cached_fim_table.sql @@ -0,0 +1,49 @@ +-- This is the query that pulls cached hand fim from the cache on Redshift. It does this by joining to the just-populated flows table, with WHERE clauses on discharge +-- As of right now, feature_id, hydro_id, huc8, branch, and stage combine to represent a primary key in the hand hydrotables, so all of those fields are used in joins +-- (I've asked the fim team to hash a single unique id for feature_id, hydro_id, huc8, branch combinations... which will simplify these queries, and hopefully help with performance. +TRUNCATE {rs_fim_table}; +TRUNCATE {rs_fim_table}_geo; +TRUNCATE {rs_fim_table}_zero_stage; +INSERT INTO {rs_fim_table}(hydro_id, feature_id, huc8, branch, forecast_discharge_cfs, rc_discharge_cfs, rc_previous_discharge_cfs, rc_stage_ft, rc_previous_stage_ft, + max_rc_stage_ft, max_rc_discharge_cfs, fim_version, reference_time, prc_method) +SELECT + fs.hydro_id as hydro_id, + fs.feature_id as feature_id, + fs.huc8, + fs.branch, + fs.discharge_cfs AS forecast_discharge_cfs, + cf.rc_discharge_cfs, + cf.rc_previous_discharge_cfs, + cf.rc_stage_ft, + cf.rc_previous_stage_ft, + cfm.max_rc_stage_ft, + cfm.max_rc_discharge_cfs, + cfm.fim_version, + to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, + 'Cached' AS prc_method +FROM {rs_fim_table}_flows AS fs +JOIN fim.hydrotable_cached_max AS cfm ON fs.feature_id = cfm.feature_id AND fs.hydro_id = cfm.hydro_id AND fs.huc8 = cfm.huc8 AND fs.branch = cfm.branch +JOIN fim.hydrotable_cached AS cf ON fs.feature_id = cf.feature_id AND fs.hydro_id = cf.hydro_id AND fs.huc8 = cf.huc8 AND fs.branch = cf.branch +WHERE (fs.discharge_cfs <= cf.rc_discharge_cfs AND fs.discharge_cfs > cf.rc_previous_discharge_cfs) OR (fs.discharge_cfs >= cfm.max_rc_discharge_cfs); + +INSERT INTO {rs_fim_table}_geo(hydro_id, feature_id, huc8, branch, rc_stage_ft, geom_part, geom) +SELECT fim.hydro_id, fim.feature_id, fim.huc8, fim.branch, fim.rc_stage_ft, row_number() OVER ()::integer AS geom_part, geom +FROM {rs_fim_table} AS fim +JOIN fim.hydrotable_cached_geo AS cfg ON fim.feature_id = cfg.feature_id AND fim.hydro_id = cfg.hydro_id AND fim.huc8 = cfg.huc8 AND fim.branch = cfg.branch AND fim.rc_stage_ft = cfg.rc_stage_ft; + +INSERT INTO {rs_fim_table}_zero_stage(hydro_id, feature_id, huc8, branch, rc_discharge_cms, note) +SELECT zero_stage.hydro_id, zero_stage.feature_id, zero_stage.huc8, zero_stage.branch, zero_stage.rc_discharge_cms, zero_stage.note +FROM fim.hydrotable_cached_zero_stage AS zero_stage +JOIN {rs_fim_table}_flows AS Status +ON status.feature_id = zero_stage.feature_id AND status.hydro_id = zero_stage.hydro_id AND status.huc8 = zero_stage.huc8 AND status.branch = zero_stage.branch +WHERE (status.discharge_cms <= zero_stage.rc_discharge_cms) OR zero_stage.rc_discharge_cms = 0; + +UPDATE {rs_fim_table}_flows AS status +SET prc_status = 'Cached' +FROM {rs_fim_table} AS fim +WHERE status.feature_id = fim.feature_id AND status.hydro_id = fim.hydro_id AND status.huc8 = fim.huc8 AND status.branch = fim.branch; + +UPDATE {rs_fim_table}_flows AS status +SET prc_status = 'Zero_Stage' +FROM {rs_fim_table}_zero_stage AS zero_stage +WHERE status.feature_id = zero_stage.feature_id AND status.hydro_id = zero_stage.hydro_id AND status.huc8 = zero_stage.huc8 AND status.branch = zero_stage.branch; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/3a_rds_ras2fim_insertion.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/3a_rds_ras2fim_insertion.sql new file mode 100644 index 00000000..2bf25280 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/3a_rds_ras2fim_insertion.sql @@ -0,0 +1,45 @@ +-- This SQL queries the ras2fim cache on RDS, and inserts appropriate rows into the fim tables of the given run. +TRUNCATE {db_fim_table}; +TRUNCATE {db_fim_table}_geo; +TRUNCATE {db_fim_table}_zero_stage; + +INSERT INTO {db_fim_table}( + hydro_id, feature_id, huc8, branch, forecast_discharge_cfs, + rc_discharge_cfs, rc_previous_discharge_cfs, rc_stage_ft, rc_previous_stage_ft, + max_rc_stage_ft, max_rc_discharge_cfs, fim_version, reference_time, prc_method +) + +SELECT + gc.feature_id as hydro_id, + gc.feature_id as feature_id, + fhc.huc8, + NULL as branch, + fs.discharge_cfs as forecast_discharge_cfs, + gc.discharge_cfs as rc_discharge_cfs, + gc.previous_discharge_cfs as rc_previous_discharge_cfs, + gc.stage_ft as rc_stage_ft, + gc.previous_stage_ft as rc_previous_stage_ft, + mgc.max_rc_stage_ft, + mgc.max_rc_discharge_cfs, + CONCAT ('ras2fim_', gc.version) as fim_version, + '{reference_time}' as reference_time, + 'Ras2FIM' AS prc_method +FROM ras2fim.geocurves gc +JOIN {db_fim_table}_flows fs ON fs.feature_id = gc.feature_id +JOIN derived.featureid_huc_crosswalk fhc ON fs.feature_id = fhc.feature_id +JOIN ras2fim.max_geocurves mgc ON gc.feature_id = mgc.feature_id +JOIN {db_fim_table} fim ON gc.feature_id = fim.feature_id +WHERE gc.discharge_cfs >= fs.discharge_cfs AND gc.previous_discharge_cfs < fs.discharge_cfs + AND fim.feature_id IS NULL; + +INSERT INTO {db_fim_table}_geo (hydro_id, feature_id, rc_stage_ft, geom_part, geom) +SELECT fim.hydro_id, fim.feature_id, fim.rc_stage_ft, row_number() OVER ()::integer AS geom_part, ST_Transform(gc.geom, 3857) as geom +FROM {db_fim_table} AS fim +JOIN ras2fim.geocurves AS gc ON fim.feature_id = gc.feature_id AND fim.rc_stage_ft = gc.stage_ft; + +-- Update the flows table prc_status column to reflect the features that were inserted from Ras2FIM cache. +UPDATE {db_fim_table}_flows AS flows +SET prc_status = 'Inserted FROM Ras2FIM Cache' +FROM {db_fim_table} AS fim +WHERE flows.feature_id = fim.feature_id AND flows.hydro_id = fim.hydro_id AND flows.huc8 = fim.huc8 AND flows.branch = fim.branch + AND fim.prc_method = 'Ras2FIM' \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/3b_rds_cached_hand_insertion.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/3b_rds_cached_hand_insertion.sql new file mode 100644 index 00000000..b62f0b5a --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/3b_rds_cached_hand_insertion.sql @@ -0,0 +1,38 @@ +-- This SQL queries the just-updated hand cache table on RDS, and inserts appropriate rows into the fim tables of the given run. +INSERT INTO {db_fim_table}( + SELECT * FROM dblink('external_vpp_redshift', $REDSHIFT$ + SELECT hydro_id, feature_id, huc8, branch, forecast_discharge_cfs, forecast_stage_ft, rc_discharge_cfs, + rc_previous_discharge_cfs, rc_stage_ft, rc_previous_stage_ft, max_rc_stage_ft, max_rc_discharge_cfs, + fim_version, reference_time, prc_method + FROM {rs_fim_table}; + $REDSHIFT$) AS t1 (hydro_id integer, feature_id integer, huc8 integer, branch bigint, forecast_discharge_cfs double precision, forecast_stage_ft double precision, rc_discharge_cfs double precision, + rc_previous_discharge_cfs double precision, rc_stage_ft integer, rc_previous_stage_ft double precision, max_rc_stage_ft double precision, max_rc_discharge_cfs double precision, + fim_version text, reference_time text, prc_method text) +); + +INSERT INTO {db_fim_table}_geo( + SELECT * FROM dblink('external_vpp_redshift', $REDSHIFT$ + SELECT hydro_id, feature_id, huc8, branch, rc_stage_ft, geom_part, geom + FROM {rs_fim_table}_geo; + $REDSHIFT$) AS t1 (hydro_id integer, feature_id integer, huc8 integer, branch bigint, rc_stage_ft integer, geom_part integer, geom geometry) +); + +INSERT INTO {db_fim_table}_zero_stage( + SELECT * FROM dblink('external_vpp_redshift', $REDSHIFT$ + SELECT hydro_id , feature_id, huc8, branch, rc_discharge_cms, note + FROM {rs_fim_table}_zero_stage; + $REDSHIFT$) AS t1 (hydro_id integer, feature_id integer, huc8 integer, branch bigint, rc_discharge_cms double precision, note text) +); + +-- Update the flows table prc_status column to reflect the features that were inserted from Redshift cache. +UPDATE {db_fim_table}_flows AS flows +SET prc_status = 'Inserted FROM HAND Cache' +FROM {db_fim_table} AS fim +WHERE flows.feature_id = fim.feature_id AND flows.hydro_id = fim.hydro_id AND flows.huc8 = fim.huc8 AND flows.branch = fim.branch + AND fim.prc_method = 'Cached' + +-- Update the flows table prc_status column to reflect the features that were inserted from Redshift cache. +UPDATE {db_fim_table}_flows AS flows +SET prc_status = 'Inserted FROM HAND Cache - Zero Stage' +FROM {db_fim_table}_zero_stage AS fim +WHERE flows.feature_id = fim.feature_id AND flows.hydro_id = fim.hydro_id AND flows.huc8 = fim.huc8 AND flows.branch = fim.branch \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/4a_rds_create_fim_publish_table.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/4a_rds_create_fim_publish_table.sql new file mode 100644 index 00000000..a90c72f2 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/4a_rds_create_fim_publish_table.sql @@ -0,0 +1,30 @@ +-- This is a generic / standardized query to create a publish.fim table for fim_config product processing (works for NWM configurations, but may not work for special fim configurations like RnR or CatFIM) +DROP TABLE IF EXISTS {db_publish_table}; + +SELECT + inun.hydro_id, + inun.hydro_id::TEXT AS hydro_id_str, + inun.feature_id, + inun.feature_id::TEXT AS feature_id_str, + inun.huc8, + inun.branch, + channels.strm_order, + channels.name, + channels.state, + inun.forecast_discharge_cfs as streamflow_cfs, + inun.rc_discharge_cfs, + inun.forecast_stage_ft as fim_stage_ft, + inun.rc_stage_ft, + inun.max_rc_stage_ft, + inun.max_rc_discharge_cfs, + inun.fim_version, + to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, + to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS valid_time, + to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, + ST_COLLECT(geo.geom) as geom +INTO {db_publish_table} +FROM {db_fim_table} as inun +JOIN {db_fim_table}_geo as geo ON inun.feature_id = geo.feature_id AND inun.hydro_id = geo.hydro_id AND inun.huc8 = geo.huc8 AND inun.branch = geo.branch +LEFT JOIN derived.channels_{domain} as channels ON channels.feature_id = inun.feature_id +GROUP BY inun.hydro_id, inun.feature_id, inun.huc8, inun.branch, channels.strm_order, channels.name, channels.state, inun.forecast_discharge_cfs, inun.forecast_stage_ft, + inun.rc_discharge_cfs,inun.rc_stage_ft,inun.max_rc_stage_ft,inun.max_rc_discharge_cfs,inun.fim_version; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/5a_redshift_cache_fim_from_rds.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/5a_redshift_cache_fim_from_rds.sql new file mode 100644 index 00000000..a103c7cb --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_caching_templates/5a_redshift_cache_fim_from_rds.sql @@ -0,0 +1,65 @@ +-- This template is designed to add freshly processed FIM polygons (which don't already exist in the cache) in the current FIM run back into to the cached hand tables on Redshift. +-- To ensure that no duplicates are added to the cache (which could be possible if multiple fim configurations are running at the same time), this query joins to the target table and ensures that +-- the current hydrotable record doesn't alraedy exist in the cache. This slows down the query significantly, and there is likely a potential optimization here... possibly using the UPSERT functionality of Redshift. +-- As of right now, feature_id, hydro_id, huc8, branch, and stage combine to represent a primary key in the hand hydrotables, so all of those fields are used in joins +-- (I've asked the fim team to hash a single unique id for feature_id, hydro_id, huc8, branch combinations... which will simplify these queries, and hopefully help with performance. + +-- 1. Add unique feature_id/hydro_id records to the hydrotable_cached_max table +INSERT INTO fim.hydrotable_cached_max(hydro_id, feature_id, huc8, branch, fim_version, max_rc_discharge_cfs, max_rc_stage_ft) +SELECT + fim.hydro_id, + fim.feature_id, + fim.huc8, + fim.branch, + fim.fim_version, + fim.max_rc_discharge_cfs, + fim.max_rc_stage_ft +FROM {postgis_fim_table} AS fim +LEFT OUTER JOIN fim.hydrotable_cached_max AS hcm ON fim.hydro_id = hcm.hydro_id AND fim.feature_id = hcm.feature_id AND fim.huc8 = hcm.huc8 AND fim.branch = hcm.branch +WHERE fim.prc_method = 'HAND_Processing' AND +hcm.hydro_id IS NULL +GROUP BY fim.hydro_id, fim.feature_id, fim.huc8, fim.branch, fim.fim_version, fim.max_rc_discharge_cfs, fim.max_rc_stage_ft; + +-- 2. Add records for each step of the hydrotable to the hydrotable_cached table +INSERT INTO fim.hydrotable_cached (hydro_id, feature_id, huc8, branch, rc_discharge_cfs, rc_previous_discharge_cfs, rc_stage_ft, rc_previous_stage_ft) +SELECT + fim.hydro_id, + fim.feature_id, + fim.huc8, + fim.branch, + fim.rc_discharge_cfs, + fim.rc_previous_discharge_cfs, + fim.rc_stage_ft, + fim.rc_previous_stage_ft +FROM {postgis_fim_table} AS fim +LEFT OUTER JOIN fim.hydrotable_cached AS hc ON fim.hydro_id = hc.hydro_id AND fim.rc_stage_ft = hc.rc_stage_ft AND fim.feature_id = hc.feature_id AND fim.huc8 = hc.huc8 AND fim.branch = hc.branch +WHERE fim.prc_method = 'HAND_Processing' AND +hc.rc_stage_ft IS NULL; + +-- 3. Add records for each subdivided part of the geometry to hydrotable_cached_geo table +INSERT INTO fim.hydrotable_cached_geo (hydro_id, feature_id, huc8, branch, rc_stage_ft, geom_part, geom) +SELECT + fim.hydro_id, + fim.feature_id, + fim.huc8, + fim.branch, + fim.rc_stage_ft, + fim.geom_part, + ST_GeomFromText(geom_wkt) +FROM {postgis_fim_table}_geo_view AS fim +JOIN fim.hydrotable_cached_max AS hcm ON fim.hydro_id = hcm.hydro_id AND fim.feature_id = hcm.feature_id AND fim.huc8 = hcm.huc8 AND fim.branch = hcm.branch +LEFT OUTER JOIN fim.hydrotable_cached_geo AS hcg ON fim.hydro_id = hcg.hydro_id AND fim.rc_stage_ft = hcg.rc_stage_ft AND fim.feature_id = hcg.feature_id AND fim.huc8 = hcg.huc8 AND fim.branch = hcg.branch +WHERE hcg.rc_stage_ft IS NULL; + +-- 4. Add records for zero_stage features to zero stage table +INSERT INTO fim.hydrotable_cached_zero_stage (hydro_id, feature_id, huc8, branch, rc_discharge_cms, note) +SELECT + fim.hydro_id, + fim.feature_id, + fim.huc8, + fim.branch, + fim.rc_discharge_cms, + fim.note +FROM {postgis_fim_table}_zero_stage AS fim +LEFT OUTER JOIN fim.hydrotable_cached_zero_stage AS hczs ON fim.hydro_id = hczs.hydro_id AND fim.feature_id = hczs.feature_id AND fim.huc8 = hczs.huc8 AND fim.branch = hczs.branch +WHERE hczs.rc_discharge_cms IS NULL; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation.sql deleted file mode 100644 index 1d602002..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation.sql +++ /dev/null @@ -1,24 +0,0 @@ -DROP TABLE IF EXISTS publish.ana_inundation; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS valid_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state -INTO publish.ana_inundation -FROM ingest.ana_inundation as inun -left join derived.channels_conus as channels ON channels.feature_id = inun.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation_hi.sql deleted file mode 100644 index adc7f6c4..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation_hi.sql +++ /dev/null @@ -1,29 +0,0 @@ ---Add an empty row so that service monitor will pick up a reference and update time in the event of no fim features -INSERT INTO ingest.ana_inundation_hi( - hydro_id, hydro_id_str, geom, branch, feature_id, feature_id_str, streamflow_cfs, fim_stage_ft, max_rc_stage_ft, max_rc_discharge_cfs, fim_version, reference_time, huc8) - VALUES (-9999, '-9999', NULL, 'NA', -9999, '-9999', -9999, -9999, -9999, -9999, 'NA', to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC'), '-9999'); - -DROP TABLE IF EXISTS publish.ana_inundation_hi; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS valid_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - 'HI' as state -INTO publish.ana_inundation_hi -FROM ingest.ana_inundation_hi as inun -left join derived.channels_hi AS channels ON channels.feature_id = inun.feature_id diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation_prvi.sql deleted file mode 100644 index 23c9384b..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_inundation_prvi.sql +++ /dev/null @@ -1,29 +0,0 @@ ---Add an empty row so that service monitor will pick up a reference and update time in the event of no fim features -INSERT INTO ingest.ana_inundation_prvi( - hydro_id, hydro_id_str, geom, branch, feature_id, feature_id_str, streamflow_cfs, fim_stage_ft, max_rc_stage_ft, max_rc_discharge_cfs, fim_version, reference_time, huc8) - VALUES (-9999, '-9999', NULL, 'NA', -9999, '-9999', -9999, -9999, -9999, -9999, 'NA', to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC'), '-9999'); - -DROP TABLE IF EXISTS publish.ana_inundation_prvi; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS valid_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - 'PRVI' AS state -INTO publish.ana_inundation_prvi -FROM ingest.ana_inundation_prvi as inun -left join derived.channels_prvi as channels ON channels.feature_id = inun.feature_id diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_past_14day_max_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_past_14day_max_inundation.sql deleted file mode 100644 index 7febffc6..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_past_14day_max_inundation.sql +++ /dev/null @@ -1,25 +0,0 @@ -DROP TABLE IF EXISTS publish.ana_past_14day_max_inundation; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS valid_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state, - 'ana_past_14day' AS config -INTO publish.ana_past_14day_max_inundation -FROM ingest.ana_past_14day_max_inundation as inun -left join derived.channels_conus as channels ON channels.feature_id = inun.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_past_7day_max_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_past_7day_max_inundation.sql deleted file mode 100644 index b88d57f7..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/ana_past_7day_max_inundation.sql +++ /dev/null @@ -1,25 +0,0 @@ -DROP TABLE IF EXISTS publish.ana_past_7day_max_inundation; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS valid_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state, - 'ana_past_7day' AS config -INTO publish.ana_past_7day_max_inundation -FROM ingest.ana_past_7day_max_inundation as inun -left join derived.channels_conus as channels ON channels.feature_id = inun.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_10day.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_10day.sql deleted file mode 100644 index 69796b51..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_10day.sql +++ /dev/null @@ -1,24 +0,0 @@ -DROP TABLE IF EXISTS publish.mrf_gfs_max_inundation_10day; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state, - 'mrf_10day' AS config -INTO publish.mrf_gfs_max_inundation_10day -FROM ingest.mrf_gfs_max_inundation_10day as inun -left join derived.channels_conus as channels ON inun.feature_id = channels.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_3day.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_3day.sql deleted file mode 100644 index dae45d00..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_3day.sql +++ /dev/null @@ -1,24 +0,0 @@ -DROP TABLE IF EXISTS publish.mrf_gfs_max_inundation_3day; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state, - 'mrf_3day' AS config -INTO publish.mrf_gfs_max_inundation_3day -FROM ingest.mrf_gfs_max_inundation_3day as inun -left join derived.channels_conus as channels ON inun.feature_id = channels.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_5day.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_5day.sql deleted file mode 100644 index e55d41ed..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_gfs_max_inundation_5day.sql +++ /dev/null @@ -1,24 +0,0 @@ -DROP TABLE IF EXISTS publish.mrf_gfs_max_inundation_5day; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state, - 'mrf_5day' AS config -INTO publish.mrf_gfs_max_inundation_5day -FROM ingest.mrf_gfs_max_inundation_5day as inun -left join derived.channels_conus as channels ON inun.feature_id = channels.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_10day.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_10day.sql deleted file mode 100644 index 9658e25f..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_10day.sql +++ /dev/null @@ -1,24 +0,0 @@ -DROP TABLE IF EXISTS publish.mrf_nbm_max_inundation_10day; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state, - 'mrf_10day' AS config -INTO publish.mrf_nbm_max_inundation_10day -FROM ingest.mrf_nbm_max_inundation_10day as inun -left join derived.channels_conus as channels ON inun.feature_id = channels.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_3day.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_3day.sql deleted file mode 100644 index f0de42c3..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_3day.sql +++ /dev/null @@ -1,24 +0,0 @@ -DROP TABLE IF EXISTS publish.mrf_nbm_max_inundation_3day; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state, - 'mrf_3day' AS config -INTO publish.mrf_nbm_max_inundation_3day -FROM ingest.mrf_nbm_max_inundation_3day as inun -left join derived.channels_conus as channels ON inun.feature_id = channels.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_5day.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_5day.sql deleted file mode 100644 index 96d95938..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/mrf_nbm_max_inundation_5day.sql +++ /dev/null @@ -1,24 +0,0 @@ -DROP TABLE IF EXISTS publish.mrf_nbm_max_inundation_5day; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state, - 'mrf_5day' AS config -INTO publish.mrf_nbm_max_inundation_5day -FROM ingest.mrf_nbm_max_inundation_5day as inun -left join derived.channels_conus as channels ON inun.feature_id = channels.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/rfc_based_5day_max_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/rfc_based_5day_max_inundation.sql index 79e766c4..e70c9984 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/rfc_based_5day_max_inundation.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/rfc_based_5day_max_inundation.sql @@ -2,25 +2,30 @@ DROP TABLE IF EXISTS publish.rfc_based_5day_max_inundation; SELECT inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, + inun.hydro_id::TEXT AS hydro_id_str, inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, + inun.feature_id::TEXT AS feature_id_str, inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, + inun.branch, channels.strm_order, channels.name, channels.state, - rnr_flow.influental_forecast_text AS inherited_rfc_forecasts, - rnr_flow.viz_status AS max_status + inun.forecast_discharge_cfs as streamflow_cfs, + inun.rc_discharge_cfs, + inun.forecast_stage_ft as fim_stage_ft, + inun.rc_stage_ft, + inun.max_rc_stage_ft, + inun.max_rc_discharge_cfs, + inun.fim_version, + rnr_flow.influental_forecast_text AS inherited_rfc_forecasts, + rnr_flow.viz_status AS max_status, + to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, + to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, + ST_COLLECT(geo.geom) as geom INTO publish.rfc_based_5day_max_inundation FROM ingest.rfc_based_5day_max_inundation as inun +JOIN ingest.rfc_based_5day_max_inundation_geo as geo ON inun.feature_id = geo.feature_id AND inun.hydro_id = geo.hydro_id AND inun.huc8 = geo.huc8 AND inun.branch = geo.branch JOIN publish.rfc_based_5day_max_streamflow rnr_flow ON rnr_flow.feature_id = inun.feature_id -LEFT JOIN derived.channels_conus as channels ON channels.feature_id = inun.feature_id; \ No newline at end of file +LEFT JOIN derived.channels_conus as channels ON channels.feature_id = inun.feature_id +GROUP BY inun.hydro_id, inun.feature_id, inun.huc8, inun.branch, channels.strm_order, channels.name, channels.state, inun.forecast_discharge_cfs, + inun.rc_discharge_cfs,inun.rc_stage_ft,inun.max_rc_stage_ft,inun.max_rc_discharge_cfs,inun.fim_version; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_18hr_max_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_18hr_max_inundation.sql deleted file mode 100644 index ca759cee..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_18hr_max_inundation.sql +++ /dev/null @@ -1,23 +0,0 @@ -DROP TABLE IF EXISTS publish.srf_18hr_max_inundation; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - channels.state -INTO publish.srf_18hr_max_inundation -FROM ingest.srf_18hr_max_inundation as inun -left join derived.channels_conus as channels ON channels.feature_id = inun.feature_id; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_48hr_max_inundation_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_48hr_max_inundation_hi.sql deleted file mode 100644 index 16c913dd..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_48hr_max_inundation_hi.sql +++ /dev/null @@ -1,28 +0,0 @@ ---Add an empty row so that service monitor will pick up a reference and update time in the event of no fim features -INSERT INTO ingest.srf_48hr_max_inundation_hi( - hydro_id, hydro_id_str, geom, branch, feature_id, feature_id_str, streamflow_cfs, fim_stage_ft, max_rc_stage_ft, max_rc_discharge_cfs, fim_version, reference_time, huc8) - VALUES (-9999, '-9999', NULL, 'NA', -9999, '-9999', -9999, -9999, -9999, -9999, 'NA', to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC'), '-9999'); - -DROP TABLE IF EXISTS publish.srf_48hr_max_inundation_hi; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - 'HI' AS state -INTO publish.srf_48hr_max_inundation_hi -FROM ingest.srf_48hr_max_inundation_hi as inun -left join derived.channels_hi as channels ON channels.feature_id = inun.feature_id; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_48hr_max_inundation_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_48hr_max_inundation_prvi.sql deleted file mode 100644 index 2b66888d..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_configs/srf_48hr_max_inundation_prvi.sql +++ /dev/null @@ -1,28 +0,0 @@ ---Add an empty row so that service monitor will pick up a reference and update time in the event of no fim features -INSERT INTO ingest.srf_48hr_max_inundation_prvi( - hydro_id, hydro_id_str, geom, branch, feature_id, feature_id_str, streamflow_cfs, fim_stage_ft, max_rc_stage_ft, max_rc_discharge_cfs, fim_version, reference_time, huc8) - VALUES (-9999, '-9999', NULL, 'NA', -9999, '-9999', -9999, -9999, -9999, -9999, 'NA', to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC'), '-9999'); - -DROP TABLE IF EXISTS publish.srf_48hr_max_inundation_prvi; - -SELECT - inun.hydro_id, - inun.hydro_id_str::TEXT AS hydro_id_str, - inun.branch, - inun.feature_id, - inun.feature_id_str::TEXT AS feature_id_str, - inun.streamflow_cfs, - inun.fim_stage_ft, - inun.max_rc_stage_ft, - inun.max_rc_discharge_cfs, - inun.fim_version, - to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - inun.huc8, - inun.geom, - to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, - channels.strm_order, - channels.name, - 'PRVI' AS state -INTO publish.srf_48hr_max_inundation_prvi -FROM ingest.srf_48hr_max_inundation_prvi as inun -left join derived.channels_prvi as channels ON channels.feature_id = inun.feature_id; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_action.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_action.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_action.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_action.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_major.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_major.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_major.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_major.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_minor.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_minor.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_minor.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_minor.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_moderate.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_moderate.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_moderate.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_moderate.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_record.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_record.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/flow_based_catfim_record.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/flow_based_catfim_record.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_10_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_10_inundation.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_10_inundation.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_10_inundation.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_25_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_25_inundation.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_25_inundation.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_25_inundation.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_2_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_2_inundation.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_2_inundation.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_2_inundation.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_50_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_50_inundation.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_50_inundation.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_50_inundation.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_5_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_5_inundation.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_5_inundation.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_5_inundation.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_high_water_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_high_water_inundation.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rf_high_water_inundation.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rf_high_water_inundation.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rfc_based_5day_max_inundation.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rfc_based_5day_max_inundation.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/rfc_based_5day_max_inundation.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/rfc_based_5day_max_inundation.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_action.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_action.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_action.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_action.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_major.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_major.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_major.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_major.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_minor.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_minor.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_minor.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_minor.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_moderate.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_moderate.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_moderate.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_moderate.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_record.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_record.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/stage_based_catfim_record.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/stage_based_catfim_record.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_0_branches.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_0_branches.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_0_branches.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_0_branches.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_0_branches_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_0_branches_hi.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_0_branches_hi.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_0_branches_hi.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_0_branches_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_0_branches_prvi.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_0_branches_prvi.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_0_branches_prvi.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_gms_branches.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_gms_branches.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_gms_branches.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_gms_branches.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_gms_branches_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_gms_branches_hi.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_gms_branches_hi.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_gms_branches_hi.sql diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_gms_branches_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_gms_branches_prvi.sql similarity index 100% rename from Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/static_hand_catchments_gms_branches_prvi.sql rename to Core/LAMBDA/viz_functions/viz_db_postprocess_sql/fim_flows/static_hand_catchments_gms_branches_prvi.sql diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/lambda_function.py b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/lambda_function.py index 79746290..bf40edb8 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/lambda_function.py +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/lambda_function.py @@ -1,5 +1,6 @@ import re import os +from datetime import datetime from viz_classes import database def lambda_handler(event, context): @@ -8,46 +9,104 @@ def lambda_handler(event, context): reference_time = event['args']['reference_time'] sql_replace = event['args']['sql_rename_dict'] sql_replace.update({'1900-01-01 00:00:00': reference_time}) #setup a replace dictionary, starting with the reference time of the current pipeline. + check_dependencies = True #default value, unless specified elsewhere # Don't run any SQL if it's a reference service if step in ["products", "fim_config"]: if event['args']['product']['configuration'] == "reference": return - # Admin tasks + # For FIM steps, setup some other variables based on the step function inputs and provide them to the sql_replace dictionary as args/params + if step == "hand_pre_processing" or step == "hand_post_processing" or step == "fim_config": + # Define the table names that will be used in the SQL templates + # TODO: Move this to initialize pipeline and Update this to work with past event functionality? + max_flows_table = event['args']['fim_config']['flows_table'] + db_fim_table = event['args']['fim_config']['target_table'] + rs_fim_table = db_fim_table.replace("ingest", "ingest_rs") + domain = event['args']['product']['domain'] + sql_replace.update({"{max_flows_table}":max_flows_table}) + sql_replace.update({"{db_fim_table}":db_fim_table}) + sql_replace.update({"{rs_fim_table}":rs_fim_table}) + sql_replace.update({"{domain}":domain}) + sql_replace.update({"{postgis_fim_table}":db_fim_table.replace("ingest", "external_viz_ingest")}) + sql_replace.update({"{db_publish_table}":db_fim_table.replace("ingest", "publish")}) + + ############################################################ Conditional Logic ########################################################## + # This section contains the conditional logic of database operations within our pipelline. At some point it may be nice to abstract this. + + # General RDS DB Admin tasks if folder == 'admin': + db_type = "viz" run_admin_tasks(event, folder, step, sql_replace, reference_time) else: + sql_files_to_run =[] # Max Flow if step == "max_flows": + db_type = "viz" sql_file = event['args']['db_max_flow']['max_flows_sql_file'] - # FIM Config + sql_files_to_run.append({"sql_file":sql_file, "db_type":db_type}) + + ###################### FIM Workflows ###################### + # All of the pre and post processing steps of a fim workflow (everything but step 4) - see attached readme - are templated, and can be executed using the input parameters defined in the step function + elif step == "hand_pre_processing" or step == "hand_post_processing": + # Get the sql file instructions from the step function parameter dictionary, and add it to the list to run + sql_templates_to_run = event['sql_templates_to_run'] + sql_files_to_run.extend(sql_templates_to_run) + + # FIM Config Step 4 - This is where we actually create the publish inundation tables to send to the EGIS service, and in this case we look to + # see if a product-specific sql file exists (for special cases like RnR, CatFIM, etc.), and if not, we use a template file. elif step == 'fim_config': if not event['args']['fim_config'].get('postprocess'): return + db_type = "viz" sql_file = event['args']['fim_config']['postprocess']['sql_file'] + if os.path.exists(os.path.join(folder, sql_file)): #if there is product-specific fim_configs sql file, use it. + sql_files_to_run.append({"sql_file":sql_file, "db_type":db_type}) + else: # if not, use the fim_publish_template + folder = 'fim_caching_templates' + sql_file = '4a_rds_create_fim_publish_table' + sql_replace.update({"{domain}":domain}) + sql_files_to_run.append({"sql_file":sql_file, "db_type":db_type}) + + ########################################################## + # Product elif step == "products": + db_type = "viz" folder = os.path.join(folder, event['args']['product']['configuration']) sql_file = event['args']['postprocess_sql']['sql_file'] + sql_files_to_run.append({"sql_file":sql_file, "db_type":db_type}) # Summary elif step == 'summaries': + db_type = "viz" folder = os.path.join(folder, event['args']['product']['product']) - sql_file = event['args']['postprocess_summary']['sql_file'] - - ### Run the Appropriate SQL File ### - sql_path = f"{folder}/{sql_file}.sql" - - # Checks if all tables references in sql file exist and are updated (if applicable) - # Raises a custom RequiredTableNotUpdated if not, which will be caught by viz_pipline - # and invoke a retry - database(db_type="viz").check_required_tables_updated(sql_path, sql_replace, reference_time, raise_if_false=True) + sql_file = event['args']['postprocess_summary']['sql_file'] + sql_files_to_run.append({"sql_file":sql_file, "db_type":db_type}) + + ############################################################ Run the SQL ########################################################## + # Iterate through the sql commands defined in the logic above + for sql_file_to_run in sql_files_to_run: + sql_file = sql_file_to_run['sql_file'] + db_type = sql_file_to_run['db_type'] + if 'check_dependencies' in sql_file_to_run: # This allows one to set a specific step to not check db dependences, which we currently want to avoid on Redshift and Hand Preprocessing steps (since tables are truncated prior) + check_dependencies = sql_file_to_run['check_dependencies'] + + ### Get the Appropriate SQL File ### + sql_path = f"{folder}/{sql_file}.sql" + + if db_type == "viz" and check_dependencies is True: + # Checks if all tables references in sql file exist and are updated (if applicable) + # Raises a custom RequiredTableNotUpdated if not, which will be caught by viz_pipline + # and invoke a retry + # TODO: I do not currently have this setup for Redshift, need to think that through. + database(db_type=db_type).check_required_tables_updated(sql_path, sql_replace, reference_time, raise_if_false=True) - run_sql(sql_path, sql_replace) + run_sql(sql_path, sql_replace, db_type=db_type) return True -# Special function to handle admin-only sql tasks +############################################################################################################################################ +# Special function to handle admin-only sql tasks - this is used for preparing for and finishing ingest tasks def run_admin_tasks(event, folder, step, sql_replace, reference_time): past_event = True if len(sql_replace) > 1 else False target_table = event['args']['db_ingest_group']['target_table'] @@ -84,8 +143,9 @@ def run_admin_tasks(event, folder, step, sql_replace, reference_time): if feature_id_column_exists[0]: run_sql('admin/remove_oconus_features.sql', sql_replace) -# Run sql from string or file, and replace any items basd on the sql_replace dictionary. -def run_sql(sql_path_or_str, sql_replace=None): +############################################################################################################################################ +# This function runs SQL on a database. It also can accept a sql_replace dictionary, where it does a find and replace on the SQL text before execution. +def run_sql(sql_path_or_str, sql_replace=None, db_type="viz"): result = None if not sql_replace: sql_replace = {} @@ -93,8 +153,10 @@ def run_sql(sql_path_or_str, sql_replace=None): # Determine if arg is file or raw SQL string if os.path.exists(sql_path_or_str): sql = open(sql_path_or_str, 'r').read() + print(f" Executing {sql_path_or_str}") else: sql = sql_path_or_str + print(f" Executing custom sql") # replace portions of SQL with any items in the dictionary (at least has reference_time) # sort the replace dictionary to have longer values upfront first @@ -102,8 +164,8 @@ def run_sql(sql_path_or_str, sql_replace=None): for word, replacement in sql_replace: sql = re.sub(re.escape(word), replacement, sql, flags=re.IGNORECASE).replace('utc', 'UTC') - viz_db = database(db_type="viz") - with viz_db.get_db_connection() as connection: + db = database(db_type=db_type) + with db.get_db_connection() as connection: cur = connection.cursor() cur.execute(sql) try: @@ -111,5 +173,6 @@ def run_sql(sql_path_or_str, sql_replace=None): except: pass connection.commit() - print(f"Finished executing the SQL statement above.") + connection.close() + print(f"---> Finished.") return result diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_14day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_14day_max_flows.sql index ea757c38..38aa7aa1 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_14day_max_flows.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_14day_max_flows.sql @@ -1,9 +1,18 @@ -DROP TABLE IF EXISTS cache.max_flows_ana_14day; +CREATE TABLE IF NOT EXISTS cache.max_flows_ana_14day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); -SELECT max_14day_forecast.feature_id, - max_14day_forecast.reference_time, - max_14day_forecast.nwm_vers, - ROUND(max_14day_forecast.streamflow::numeric, 2) AS max_flow_14day_cms, - ROUND((max_14day_forecast.streamflow * 35.315)::numeric, 2) AS max_flow_14day_cfs -INTO cache.max_flows_ana_14day -FROM ingest.nwm_channel_rt_ana_14day_max AS max_14day_forecast \ No newline at end of file +TRUNCATE TABLE cache.max_flows_ana_14day; + +INSERT INTO cache.max_flows_ana_14day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT max_14day_forecast.feature_id, + max_14day_forecast.reference_time, + max_14day_forecast.nwm_vers, + ROUND(max_14day_forecast.streamflow::numeric, 2) AS discharge_cms, + ROUND((max_14day_forecast.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_ana_14day_max AS max_14day_forecast; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_7day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_7day_max_flows.sql index f61ddfba..6216990a 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_7day_max_flows.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_7day_max_flows.sql @@ -1,9 +1,18 @@ -DROP TABLE IF EXISTS cache.max_flows_ana_7day; +CREATE TABLE IF NOT EXISTS cache.max_flows_ana_7day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); -SELECT max_7day_forecast.feature_id, - max_7day_forecast.reference_time, - max_7day_forecast.nwm_vers, - ROUND(max_7day_forecast.streamflow::numeric, 2) AS max_flow_7day_cms, - ROUND((max_7day_forecast.streamflow * 35.315)::numeric, 2) AS max_flow_7day_cfs -INTO cache.max_flows_ana_7day -FROM ingest.nwm_channel_rt_ana_7day_max AS max_7day_forecast; \ No newline at end of file +TRUNCATE TABLE cache.max_flows_ana_7day; + +INSERT INTO cache.max_flows_ana_7day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT max_7day_forecast.feature_id, + max_7day_forecast.reference_time, + max_7day_forecast.nwm_vers, + ROUND(max_7day_forecast.streamflow::numeric, 2) AS discharge_cms, + ROUND((max_7day_forecast.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_ana_7day_max AS max_7day_forecast; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows.sql index 97aeea5f..6d6ff65b 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows.sql @@ -1,18 +1,17 @@ -- Create a past_hour ana table. This is an interim solution to Shawn's rate of change service. -CREATE TABLE IF NOT EXISTS cache.max_flows_ana (feature_id bigint, reference_time timestamp without time zone, nwm_vers double precision, maxflow_1hour_cms double precision, maxflow_1hour_cfs double precision); -CREATE TABLE IF NOT EXISTS cache.max_flows_ana_past_hour (feature_id bigint, reference_time timestamp without time zone, nwm_vers double precision, maxflow_1hour_cms double precision, maxflow_1hour_cfs double precision); +CREATE TABLE IF NOT EXISTS cache.max_flows_ana (feature_id bigint, reference_time text, nwm_vers double precision, discharge_cms double precision, discharge_cfs double precision); +CREATE TABLE IF NOT EXISTS cache.max_flows_ana_past_hour (feature_id bigint, reference_time text, nwm_vers double precision, discharge_cms double precision, discharge_cfs double precision); TRUNCATE TABLE cache.max_flows_ana_past_hour; INSERT INTO cache.max_flows_ana_past_hour SELECT * FROM cache.max_flows_ana; -- Regular ana max flows -DROP TABLE IF EXISTS cache.max_flows_ana; - -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - ROUND(MAX(forecasts.streamflow)::numeric, 2) AS maxflow_1hour_cms, - ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_1hour_cfs -INTO cache.max_flows_ana -FROM ingest.nwm_channel_rt_ana forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file +TRUNCATE TABLE cache.max_flows_ana; +INSERT INTO cache.max_flows_ana(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + ROUND(MAX(forecasts.streamflow)::numeric, 2) AS discharge_cms, + ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_ana forecasts + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_ak.sql index 219564dd..b717ebe5 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_ak.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_ak.sql @@ -1,11 +1,19 @@ -DROP TABLE IF EXISTS cache.max_flows_ana_ak; +CREATE TABLE IF NOT EXISTS cache.max_flows_ana_ak +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); +TRUNCATE TABLE cache.max_flows_ana_ak; -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - ROUND(MAX(forecasts.streamflow)::numeric, 2) AS maxflow_1hour_cms, - ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_1hour_cfs -INTO cache.max_flows_ana_ak -FROM ingest.nwm_channel_rt_ana_ak forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file +INSERT INTO cache.max_flows_ana_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + ROUND(MAX(forecasts.streamflow)::numeric, 2) AS discharge_cms, + ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_ana_ak forecasts + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_hi.sql index 88b32946..0a7dd2f0 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_hi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_hi.sql @@ -1,11 +1,19 @@ -DROP TABLE IF EXISTS cache.max_flows_ana_hi; +CREATE TABLE IF NOT EXISTS cache.max_flows_ana_hi +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); +TRUNCATE TABLE cache.max_flows_ana_hi; -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - ROUND(MAX(forecasts.streamflow)::numeric, 2) AS maxflow_1hour_cms, - ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_1hour_cfs -INTO cache.max_flows_ana_hi -FROM ingest.nwm_channel_rt_ana_hi forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file +INSERT INTO cache.max_flows_ana_hi(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + ROUND(MAX(forecasts.streamflow)::numeric, 2) AS discharge_cms, + ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_ana_hi forecasts + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_prvi.sql index ba49c26e..6ce9d59b 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_prvi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/ana_max_flows_prvi.sql @@ -1,11 +1,19 @@ -DROP TABLE IF EXISTS cache.max_flows_ana_prvi; +CREATE TABLE IF NOT EXISTS cache.max_flows_ana_prvi +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); +TRUNCATE TABLE cache.max_flows_ana_prvi; -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - ROUND(MAX(forecasts.streamflow)::numeric, 2) AS maxflow_1hour_cms, - ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_1hour_cfs -INTO cache.max_flows_ana_prvi -FROM ingest.nwm_channel_rt_ana_prvi forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file +INSERT INTO cache.max_flows_ana_prvi(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + ROUND(MAX(forecasts.streamflow)::numeric, 2) AS discharge_cms, + ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_ana_prvi forecasts + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_10day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_10day_max_flows.sql new file mode 100644 index 00000000..ef0169da --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_10day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_10day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_10day; +INSERT INTO cache.max_flows_mrf_gfs_10day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_mem1 forecasts + WHERE forecasts.forecast_hour > 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_10day_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_10day_max_flows_ak.sql new file mode 100644 index 00000000..eb10a38c --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_10day_max_flows_ak.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_10day_ak +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_10day_ak; +INSERT INTO cache.max_flows_mrf_gfs_10day_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_ak_mem1 forecasts + WHERE forecasts.forecast_hour > 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_3day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_3day_max_flows.sql new file mode 100644 index 00000000..19649d1a --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_3day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_3day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_3day; +INSERT INTO cache.max_flows_mrf_gfs_3day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_mem1 forecasts + WHERE forecasts.forecast_hour <= 72 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_3day_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_3day_max_flows_ak.sql new file mode 100644 index 00000000..3a71eb3b --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_3day_max_flows_ak.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_3day_ak +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_3day_ak; +INSERT INTO cache.max_flows_mrf_gfs_3day_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_ak_mem1 forecasts + WHERE forecasts.forecast_hour <= 72 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_5day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_5day_max_flows.sql new file mode 100644 index 00000000..66ad335e --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_5day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_5day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_5day; +INSERT INTO cache.max_flows_mrf_gfs_5day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_mem1 forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_5day_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_5day_max_flows_ak.sql new file mode 100644 index 00000000..22dd54cc --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_5day_max_flows_ak.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_5day_ak +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_5day_ak; +INSERT INTO cache.max_flows_mrf_gfs_5day_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_ak_mem1 forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_max_flows.sql deleted file mode 100644 index 8307cd5e..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_max_flows.sql +++ /dev/null @@ -1,14 +0,0 @@ -DROP TABLE IF EXISTS cache.max_flows_mrf_gfs; - -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - round(max(CASE WHEN forecasts.forecast_hour <= 72 THEN forecasts.streamflow ELSE NULL END)::numeric, 2) AS maxflow_3day_cms, - round(max(CASE WHEN forecasts.forecast_hour <= 120 THEN forecasts.streamflow ELSE NULL END)::numeric, 2) AS maxflow_5day_cms, - round(max(forecasts.streamflow)::numeric, 2) AS maxflow_10day_cms, - round((max(CASE WHEN forecasts.forecast_hour <= 72 THEN forecasts.streamflow ELSE NULL END) * 35.315)::numeric, 2) AS maxflow_3day_cfs, - round((max(CASE WHEN forecasts.forecast_hour <= 120 THEN forecasts.streamflow ELSE NULL END) * 35.315)::numeric, 2) AS maxflow_5day_cfs, - round((max(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_10day_cfs -INTO cache.max_flows_mrf_gfs -FROM ingest.nwm_channel_rt_mrf_gfs_mem1 forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_max_flows_ak.sql deleted file mode 100644 index ae7ed3c9..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_max_flows_ak.sql +++ /dev/null @@ -1,14 +0,0 @@ -DROP TABLE IF EXISTS cache.mrf_gfs_max_flows_ak; - -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - round(max(CASE WHEN forecasts.forecast_hour <= 72 THEN forecasts.streamflow ELSE NULL END)::numeric, 2) AS maxflow_3day_cms, - round(max(CASE WHEN forecasts.forecast_hour <= 120 THEN forecasts.streamflow ELSE NULL END)::numeric, 2) AS maxflow_5day_cms, - round(max(forecasts.streamflow)::numeric, 2) AS maxflow_10day_cms, - round((max(CASE WHEN forecasts.forecast_hour <= 72 THEN forecasts.streamflow ELSE NULL END) * 35.315)::numeric, 2) AS maxflow_3day_cfs, - round((max(CASE WHEN forecasts.forecast_hour <= 120 THEN forecasts.streamflow ELSE NULL END) * 35.315)::numeric, 2) AS maxflow_5day_cfs, - round((max(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_10day_cfs -INTO cache.mrf_gfs_max_flows_ak -FROM ingest.nwm_channel_rt_mrf_gfs_ak_mem1 forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem2_5day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem2_5day_max_flows.sql new file mode 100644 index 00000000..f44589e4 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem2_5day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_mem2_5day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_mem2_5day; +INSERT INTO cache.max_flows_mrf_gfs_mem2_5day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_mem2 forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem3_5day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem3_5day_max_flows.sql new file mode 100644 index 00000000..1582051c --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem3_5day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_mem3_gfs_5day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_mem3_gfs_5day; +INSERT INTO cache.max_flows_mrf_mem3_gfs_5day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_mem3 forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem4_5day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem4_5day_max_flows.sql new file mode 100644 index 00000000..b4ffbeac --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem4_5day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_mem4_5day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_mem4_5day; +INSERT INTO cache.max_flows_mrf_gfs_mem4_5day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_mem4 forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem5_5day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem5_5day_max_flows.sql new file mode 100644 index 00000000..cc008a12 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem5_5day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_mem5_5day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_mem5_5day; +INSERT INTO cache.max_flows_mrf_gfs_mem5_5day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_mem5 forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem6_5day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem6_5day_max_flows.sql new file mode 100644 index 00000000..6ef8ea1a --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_gfs_mem6_5day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_gfs_mem6_5day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_gfs_mem6_5day; +INSERT INTO cache.max_flows_mrf_gfs_mem6_5day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_gfs_mem6 forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_10day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_10day_max_flows.sql new file mode 100644 index 00000000..69c68e95 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_10day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_nbm_10day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_nbm_10day; +INSERT INTO cache.max_flows_mrf_nbm_10day_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_nbm forecasts + WHERE forecasts.forecast_hour > 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_10day_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_10day_max_flows_ak.sql new file mode 100644 index 00000000..2ad6ff59 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_10day_max_flows_ak.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_nbm_10day_ak +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_nbm_10day_ak; +INSERT INTO cache.max_flows_mrf_nbm_10day_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_nbm_ak forecasts + WHERE forecasts.forecast_hour > 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_3day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_3day_max_flows.sql new file mode 100644 index 00000000..24bd9e7b --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_3day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_nbm_3day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_nbm_3day; +INSERT INTO cache.max_flows_mrf_nbm_3day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_nbm forecasts + WHERE forecasts.forecast_hour <= 72 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_3day_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_3day_max_flows_ak.sql new file mode 100644 index 00000000..941058e0 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_3day_max_flows_ak.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_nbm_3day_ak +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_nbm_3day_ak; +INSERT INTO cache.max_flows_mrf_nbm_3day_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_nbm_ak forecasts + WHERE forecasts.forecast_hour <= 72 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_5day_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_5day_max_flows.sql new file mode 100644 index 00000000..03049435 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_5day_max_flows.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_nbm_5day +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_nbm_5day; +INSERT INTO cache.max_flows_mrf_nbm_5day(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_nbm forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_5day_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_5day_max_flows_ak.sql new file mode 100644 index 00000000..5975bc1c --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_5day_max_flows_ak.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS cache.max_flows_mrf_nbm_5day_ak +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); + +TRUNCATE TABLE cache.max_flows_mrf_nbm_5day_ak; +INSERT INTO cache.max_flows_mrf_nbm_5day_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round(max(forecasts.streamflow * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_mrf_nbm_ak forecasts + WHERE forecasts.forecast_hour > 72 AND forecasts.forecast_hour <= 120 + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_max_flows.sql deleted file mode 100644 index 4d734751..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_max_flows.sql +++ /dev/null @@ -1,14 +0,0 @@ -DROP TABLE IF EXISTS cache.max_flows_mrf_nbm; - -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - round(max(CASE WHEN forecasts.forecast_hour <= 72 THEN forecasts.streamflow ELSE NULL END)::numeric, 2) AS maxflow_3day_cms, - round(max(CASE WHEN forecasts.forecast_hour <= 120 THEN forecasts.streamflow ELSE NULL END)::numeric, 2) AS maxflow_5day_cms, - round(max(forecasts.streamflow)::numeric, 2) AS maxflow_10day_cms, - round((max(CASE WHEN forecasts.forecast_hour <= 72 THEN forecasts.streamflow ELSE NULL END) * 35.315)::numeric, 2) AS maxflow_3day_cfs, - round((max(CASE WHEN forecasts.forecast_hour <= 120 THEN forecasts.streamflow ELSE NULL END) * 35.315)::numeric, 2) AS maxflow_5day_cfs, - round((max(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_10day_cfs -INTO cache.max_flows_mrf_nbm -FROM ingest.nwm_channel_rt_mrf_nbm forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_max_flows_ak.sql deleted file mode 100644 index ed0a086f..00000000 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/mrf_nbm_max_flows_ak.sql +++ /dev/null @@ -1,14 +0,0 @@ -DROP TABLE IF EXISTS cache.mrf_nbm_max_flows_ak; - -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - round(max(CASE WHEN forecasts.forecast_hour <= 72 THEN forecasts.streamflow ELSE NULL END)::numeric, 2) AS maxflow_3day_cms, - round(max(CASE WHEN forecasts.forecast_hour <= 120 THEN forecasts.streamflow ELSE NULL END)::numeric, 2) AS maxflow_5day_cms, - round(max(forecasts.streamflow)::numeric, 2) AS maxflow_10day_cms, - round((max(CASE WHEN forecasts.forecast_hour <= 72 THEN forecasts.streamflow ELSE NULL END) * 35.315)::numeric, 2) AS maxflow_3day_cfs, - round((max(CASE WHEN forecasts.forecast_hour <= 120 THEN forecasts.streamflow ELSE NULL END) * 35.315)::numeric, 2) AS maxflow_5day_cfs, - round((max(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_10day_cfs -INTO cache.mrf_nbm_max_flows_ak -FROM ingest.nwm_channel_rt_mrf_nbm_ak forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/rnr_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/rnr_max_flows.sql index c8a69081..7bbb7bc8 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/rnr_max_flows.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/rnr_max_flows.sql @@ -6,8 +6,8 @@ FROM ( SELECT a.station_id as feature_id, a.reference_time, - ROUND(b.max_streamflow::numeric, 2) AS maxflow_5day_cms, - ROUND((b.max_streamflow * 35.315)::numeric, 2) AS maxflow_5day_cfs, + ROUND(b.max_streamflow::numeric, 2) AS discharge_cms, + ROUND((b.max_streamflow * 35.315)::numeric, 2) AS discharge_cfs, a.time as time_of_max FROM ingest.rnr_wrf_hydro_outputs a JOIN (SELECT station_id, MAX(streamflow) AS max_streamflow FROM ingest.rnr_wrf_hydro_outputs GROUP BY station_id) b diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows.sql index 82d3df7a..3ff4a5b1 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows.sql @@ -1,10 +1,18 @@ -DROP TABLE IF EXISTS cache.max_flows_srf; +CREATE TABLE IF NOT EXISTS cache.max_flows_srf +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - ROUND(MAX(forecasts.streamflow)::numeric, 2) AS maxflow_18hour_cms, - ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_18hour_cfs -INTO cache.max_flows_srf -FROM ingest.nwm_channel_rt_srf forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file +TRUNCATE TABLE cache.max_flows_srf; +INSERT INTO cache.max_flows_srf(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + ROUND(MAX(forecasts.streamflow)::numeric, 2) AS discharge_cms, + ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_srf forecasts + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_ak.sql index bca0eff3..081c0bdb 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_ak.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_ak.sql @@ -1,10 +1,18 @@ -DROP TABLE IF EXISTS cache.max_flows_srf_ak; +CREATE TABLE IF NOT EXISTS cache.max_flows_srf_ak +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - ROUND(MAX(forecasts.streamflow)::numeric, 2) AS maxflow_15hour_cms, - ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_15hour_cfs -INTO cache.max_flows_srf_ak -FROM ingest.nwm_channel_rt_srf_ak forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file +TRUNCATE TABLE cache.max_flows_srf_ak; +INSERT INTO cache.max_flows_srf_ak(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + ROUND(MAX(forecasts.streamflow)::numeric, 2) AS discharge_cms, + ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_srf_ak forecasts + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_hi.sql index b4cd8e08..fe180bb9 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_hi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_hi.sql @@ -1,10 +1,18 @@ -DROP TABLE IF EXISTS cache.max_flows_srf_hi; +CREATE TABLE IF NOT EXISTS cache.max_flows_srf_hi +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - round(max(forecasts.streamflow)::numeric, 2) AS maxflow_48hour_cms, - round((max(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_48hour_cfs -INTO cache.max_flows_srf_hi -FROM ingest.nwm_channel_rt_srf_hi forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file +TRUNCATE TABLE cache.max_flows_srf_hi; +INSERT INTO cache.max_flows_srf_hi(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + round(max(forecasts.streamflow)::numeric, 2) AS discharge_cms, + round((max(forecasts.streamflow) * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_srf_hi forecasts + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_prvi.sql index 8217dc31..43358eae 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_prvi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/max_flows/srf_max_flows_prvi.sql @@ -1,10 +1,18 @@ -DROP TABLE IF EXISTS cache.max_flows_srf_prvi; +CREATE TABLE IF NOT EXISTS cache.max_flows_srf_prvi +( + feature_id bigint, + reference_time text, + nwm_vers double precision, + discharge_cms numeric, + discharge_cfs numeric +); -SELECT forecasts.feature_id, - forecasts.reference_time, - forecasts.nwm_vers, - ROUND(MAX(forecasts.streamflow)::numeric, 2) AS maxflow_48hour_cms, - ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS maxflow_48hour_cfs -INTO cache.max_flows_srf_prvi -FROM ingest.nwm_channel_rt_srf_prvi forecasts -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file +TRUNCATE TABLE cache.max_flows_srf_prvi; +INSERT INTO cache.max_flows_srf_prvi(feature_id, reference_time, nwm_vers, discharge_cms, discharge_cfs) + SELECT forecasts.feature_id, + forecasts.reference_time, + forecasts.nwm_vers, + ROUND(MAX(forecasts.streamflow)::numeric, 2) AS discharge_cms, + ROUND((MAX(forecasts.streamflow) * 35.315)::numeric, 2) AS discharge_cfs + FROM ingest.nwm_channel_rt_srf_prvi forecasts + GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_high_flow_magnitude.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_high_flow_magnitude.sql index 7668910c..70221432 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_high_flow_magnitude.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_high_flow_magnitude.sql @@ -1,16 +1,16 @@ DROP TABLE IF EXISTS publish.ana_high_flow_magnitude; WITH high_flow_mag AS (SELECT maxflows.feature_id, - maxflows.maxflow_1hour_cfs AS max_flow, + maxflows.discharge_cfs AS max_flow, maxflows.reference_time, maxflows.nwm_vers, CASE - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_50_0_17c THEN '2'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_25_0_17c THEN '4'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_10_0_17c THEN '10'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_5_0_17c THEN '20'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_2_0_17c THEN '50'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.high_water_threshold THEN '>50'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_50_0_17c THEN '2'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_25_0_17c THEN '4'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_10_0_17c THEN '10'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_5_0_17c THEN '20'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_2_0_17c THEN '50'::text + WHEN maxflows.discharge_cfs >= thresholds.high_water_threshold THEN '>50'::text ELSE NULL::text END AS recur_cat, thresholds.high_water_threshold AS high_water_threshold, @@ -22,7 +22,7 @@ WITH high_flow_mag AS FROM cache.max_flows_ana maxflows JOIN derived.recurrence_flows_CONUS thresholds ON maxflows.feature_id = thresholds.feature_id WHERE (thresholds.high_water_threshold > 0::double precision) - AND maxflows.maxflow_1hour_cfs >= thresholds.high_water_threshold) + AND maxflows.discharge_cfs >= thresholds.high_water_threshold) SELECT channels.feature_id, channels.feature_id::TEXT AS feature_id_str, channels.strm_order, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_past_14day_max_high_flow_magnitude.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_past_14day_max_high_flow_magnitude.sql index 2620f105..f39028ff 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_past_14day_max_high_flow_magnitude.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_past_14day_max_high_flow_magnitude.sql @@ -9,14 +9,14 @@ SELECT channels.feature_id, hfm_14day.nwm_vers, hfm_14day.reference_time, hfm_14day.reference_time AS valid_time, - hfm_14day.max_flow_14day_cfs AS max_flow_14day_cfs, + hfm_14day.discharge_cfs AS max_flow_14day_cfs, CASE - WHEN max_flow_14day_cfs >= thresholds.rf_50_0_17C THEN '2' - WHEN max_flow_14day_cfs >= thresholds.rf_25_0_17C THEN '4' - WHEN max_flow_14day_cfs >= thresholds.rf_10_0_17C THEN '10' - WHEN max_flow_14day_cfs >= thresholds.rf_5_0_17C THEN '20' - WHEN max_flow_14day_cfs >= thresholds.rf_2_0_17C THEN '50' - WHEN max_flow_14day_cfs >= thresholds.high_water_threshold THEN '>50' + WHEN discharge_cfs >= thresholds.rf_50_0_17C THEN '2' + WHEN discharge_cfs >= thresholds.rf_25_0_17C THEN '4' + WHEN discharge_cfs >= thresholds.rf_10_0_17C THEN '10' + WHEN discharge_cfs >= thresholds.rf_5_0_17C THEN '20' + WHEN discharge_cfs >= thresholds.rf_2_0_17C THEN '50' + WHEN discharge_cfs >= thresholds.high_water_threshold THEN '>50' ELSE NULL END AS recur_cat_14day, thresholds.high_water_threshold AS high_water_threshold, @@ -31,4 +31,4 @@ INTO publish.ana_past_14day_max_high_flow_magnitude FROM derived.channels_CONUS channels JOIN derived.recurrence_flows_CONUS thresholds ON (channels.feature_id = thresholds.feature_id) JOIN cache.max_flows_ana_14day hfm_14day ON (channels.feature_id = hfm_14day.feature_id) -WHERE (thresholds.high_water_threshold > 0) AND hfm_14day.max_flow_14day_cfs >= thresholds.high_water_threshold \ No newline at end of file +WHERE (thresholds.high_water_threshold > 0) AND hfm_14day.discharge_cfs >= thresholds.high_water_threshold \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_past_7day_max_high_flow_magnitude.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_past_7day_max_high_flow_magnitude.sql index 83e024ac..6dfe0f9b 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_past_7day_max_high_flow_magnitude.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_past_7day_max_high_flow_magnitude.sql @@ -9,14 +9,14 @@ SELECT channels.feature_id, hfm_7day.nwm_vers, hfm_7day.reference_time, hfm_7day.reference_time AS valid_time, - hfm_7day.max_flow_7day_cfs AS max_flow_7day_cfs, + hfm_7day.discharge_cfs AS max_flow_7day_cfs, CASE - WHEN max_flow_7day_cfs >= thresholds.rf_50_0_17C THEN '2' - WHEN max_flow_7day_cfs >= thresholds.rf_25_0_17C THEN '4' - WHEN max_flow_7day_cfs >= thresholds.rf_10_0_17C THEN '10' - WHEN max_flow_7day_cfs >= thresholds.rf_5_0_17C THEN '20' - WHEN max_flow_7day_cfs >= thresholds.rf_2_0_17C THEN '50' - WHEN max_flow_7day_cfs >= thresholds.high_water_threshold THEN '>50' + WHEN discharge_cfs >= thresholds.rf_50_0_17C THEN '2' + WHEN discharge_cfs >= thresholds.rf_25_0_17C THEN '4' + WHEN discharge_cfs >= thresholds.rf_10_0_17C THEN '10' + WHEN discharge_cfs >= thresholds.rf_5_0_17C THEN '20' + WHEN discharge_cfs >= thresholds.rf_2_0_17C THEN '50' + WHEN discharge_cfs >= thresholds.high_water_threshold THEN '>50' ELSE NULL END AS recur_cat_7day, thresholds.high_water_threshold AS high_water_threshold, @@ -31,4 +31,4 @@ INTO publish.ana_past_7day_max_high_flow_magnitude FROM derived.channels_CONUS channels JOIN derived.recurrence_flows_CONUS thresholds ON (channels.feature_id = thresholds.feature_id) JOIN cache.max_flows_ana_7day hfm_7day ON (channels.feature_id = hfm_7day.feature_id) -WHERE (thresholds.high_water_threshold > 0) AND hfm_7day.max_flow_7day_cfs >= thresholds.high_water_threshold \ No newline at end of file +WHERE (thresholds.high_water_threshold > 0) AND hfm_7day.discharge_cfs >= thresholds.high_water_threshold \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_streamflow.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_streamflow.sql index b6c66b2d..dbceecb1 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_streamflow.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim/ana_streamflow.sql @@ -3,7 +3,7 @@ DROP TABLE IF EXISTS publish.ana_streamflow; SELECT ana.feature_id, ana.feature_id::text as feature_id_str, - ana.maxflow_1hour_cfs as streamflow, + ana.discharge_cfs as streamflow, ana.nwm_vers, ana.reference_time, ana.reference_time AS valid_time, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_alaska/ana_streamflow_ak.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_alaska/ana_streamflow_ak.sql index 111790ed..9f77ae6d 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_alaska/ana_streamflow_ak.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_alaska/ana_streamflow_ak.sql @@ -4,7 +4,7 @@ SELECT ana.feature_id, channels.feature_id as channels_feature_id, ana.feature_id::text as feature_id_str, - ana.maxflow_1hour_cfs as streamflow, + ana.discharge_cfs as streamflow, ana.nwm_vers, ana.reference_time, ana.reference_time AS valid_time, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_hawaii/ana_high_flow_magnitude_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_hawaii/ana_high_flow_magnitude_hi.sql index 134c6a5e..cc0d45b3 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_hawaii/ana_high_flow_magnitude_hi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_hawaii/ana_high_flow_magnitude_hi.sql @@ -2,17 +2,17 @@ DROP TABLE IF EXISTS publish.ana_high_flow_magnitude_hi; WITH high_flow_mag AS (SELECT maxflows.feature_id, - maxflows.maxflow_1hour_cfs AS max_flow, + maxflows.discharge_cfs AS max_flow, maxflows.reference_time, maxflows.nwm_vers, CASE WHEN thresholds.high_water_threshold = '-9999'::integer::double precision THEN 'Not Available'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_100_0 THEN '1'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_50_0 THEN '2'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_25_0 THEN '4'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_10_0 THEN '10'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_5_0 THEN '20'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.high_water_threshold THEN '>20'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_100_0 THEN '1'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_50_0 THEN '2'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_25_0 THEN '4'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_10_0 THEN '10'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_5_0 THEN '20'::text + WHEN maxflows.discharge_cfs >= thresholds.high_water_threshold THEN '>20'::text ELSE NULL::text END AS recur_cat, thresholds.high_water_threshold AS high_water_threshold, @@ -26,7 +26,7 @@ WITH high_flow_mag AS JOIN derived.recurrence_flows_hi thresholds ON maxflows.feature_id = thresholds.feature_id WHERE (thresholds.high_water_threshold > 0::double precision OR thresholds.high_water_threshold = '-9999'::integer::double precision) - AND maxflows.maxflow_1hour_cfs >= thresholds.high_water_threshold ) + AND maxflows.discharge_cfs >= thresholds.high_water_threshold ) SELECT channels.feature_id, channels.feature_id::TEXT AS feature_id_str, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_hawaii/ana_streamflow_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_hawaii/ana_streamflow_hi.sql index fe4c5a24..4546c980 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_hawaii/ana_streamflow_hi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_hawaii/ana_streamflow_hi.sql @@ -3,7 +3,7 @@ DROP TABLE IF EXISTS publish.ana_streamflow_hi; SELECT ana.feature_id, ana.feature_id::text as feature_id_str, - ana.maxflow_1hour_cfs as streamflow, + ana.discharge_cfs as streamflow, ana.nwm_vers, ana.reference_time, ana.reference_time AS valid_time, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_puertorico/ana_high_flow_magnitude_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_puertorico/ana_high_flow_magnitude_prvi.sql index d64349ef..c8ec70ce 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_puertorico/ana_high_flow_magnitude_prvi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_puertorico/ana_high_flow_magnitude_prvi.sql @@ -2,17 +2,17 @@ DROP TABLE IF EXISTS publish.ana_high_flow_magnitude_prvi; WITH high_flow_mag AS (SELECT maxflows.feature_id, - maxflows.maxflow_1hour_cfs AS max_flow, + maxflows.discharge_cfs AS max_flow, maxflows.reference_time, maxflows.nwm_vers, CASE WHEN thresholds.high_water_threshold = '-9999'::integer::double precision THEN 'Not Available'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_100_0 THEN '1'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_50_0 THEN '2'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_25_0 THEN '4'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_10_0 THEN '10'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.rf_5_0 THEN '20'::text - WHEN maxflows.maxflow_1hour_cfs >= thresholds.high_water_threshold THEN '>20'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_100_0 THEN '1'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_50_0 THEN '2'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_25_0 THEN '4'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_10_0 THEN '10'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_5_0 THEN '20'::text + WHEN maxflows.discharge_cfs >= thresholds.high_water_threshold THEN '>20'::text ELSE NULL::text END AS recur_cat, thresholds.high_water_threshold AS high_water_threshold, @@ -26,7 +26,7 @@ WITH high_flow_mag AS JOIN derived.recurrence_flows_prvi thresholds ON maxflows.feature_id = thresholds.feature_id WHERE (thresholds.high_water_threshold > 0::double precision OR thresholds.high_water_threshold = '-9999'::integer::double precision) - AND maxflows.maxflow_1hour_cfs >= thresholds.high_water_threshold ) + AND maxflows.discharge_cfs >= thresholds.high_water_threshold ) SELECT channels.feature_id, channels.feature_id::TEXT AS feature_id_str, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_puertorico/ana_streamflow_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_puertorico/ana_streamflow_prvi.sql index b32c933b..55fb3983 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_puertorico/ana_streamflow_prvi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/analysis_assim_puertorico/ana_streamflow_prvi.sql @@ -3,7 +3,7 @@ DROP TABLE IF EXISTS publish.ana_streamflow_prvi; SELECT ana.feature_id, ana.feature_id::text as feature_id_str, - ana.maxflow_1hour_cfs as streamflow, + ana.discharge_cfs as streamflow, ana.nwm_vers, ana.reference_time, ana.reference_time AS valid_time, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_alaska_mem1/mrf_gfs_10day_peak_flow_arrival_time_alaska.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_alaska_mem1/mrf_gfs_10day_peak_flow_arrival_time_alaska.sql index 4d3a19b9..deafebef 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_alaska_mem1/mrf_gfs_10day_peak_flow_arrival_time_alaska.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_alaska_mem1/mrf_gfs_10day_peak_flow_arrival_time_alaska.sql @@ -15,7 +15,7 @@ SELECT to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * min(forecasts.forecast_hour), 'YYYY-MM-DD HH24:MI:SS UTC') AS peak_flow_arrival_time, forecasts.nwm_vers, forecasts.reference_time, - max_flows.maxflow_10day_cfs AS max_flow_cfs, + max_flows.discharge_cfs AS max_flow_cfs, arrival_time.below_bank_return_hour, to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * arrival_time.below_bank_return_hour, 'YYYY-MM-DD HH24:MI:SS UTC') AS below_bank_return_time, to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, @@ -30,8 +30,8 @@ INTO publish.mrf_gfs_10day_peak_flow_arrival_time_alaska FROM ingest.nwm_channel_rt_mrf_gfs_ak_mem1 AS forecasts -- Join in max flows on max streamflow to only get peak flows -JOIN cache.mrf_gfs_max_flows_ak AS max_flows - ON forecasts.feature_id = max_flows.feature_id AND round((forecasts.streamflow*35.315)::numeric, 2) = max_flows.maxflow_10day_cfs +JOIN cache.max_flows_mrf_gfs_10day_ak AS max_flows + ON forecasts.feature_id = max_flows.feature_id AND round((forecasts.streamflow*35.315)::numeric, 2) = max_flows.discharge_cfs -- Join in channels data to get reach metadata and geometry JOIN derived.channels_alaska as channels ON forecasts.feature_id = channels.feature_id::bigint @@ -39,4 +39,4 @@ JOIN derived.channels_alaska as channels ON forecasts.feature_id = channels.feat -- Join in arrival_time JOIN arrival_time ON forecasts.feature_id = arrival_time.feature_id -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, forecasts.streamflow, max_flows.maxflow_10day_cfs, arrival_time.below_bank_return_hour, channels.geom, channels.strm_order, channels.name, channels.huc6; \ No newline at end of file +GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, forecasts.streamflow, max_flows.discharge_cfs, arrival_time.below_bank_return_hour, channels.geom, channels.strm_order, channels.name, channels.huc6; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_max_high_flow_magnitude.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_max_high_flow_magnitude.sql index 67724899..a1b667c5 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_max_high_flow_magnitude.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_max_high_flow_magnitude.sql @@ -1,36 +1,36 @@ DROP TABLE IF EXISTS publish.mrf_nbm_10day_max_high_flow_magnitude; WITH high_flow_mag AS ( - SELECT maxflows.feature_id, - maxflows.maxflow_3day_cfs, - maxflows.maxflow_5day_cfs, - maxflows.maxflow_10day_cfs, - maxflows.nwm_vers, - maxflows.reference_time, + SELECT maxflows_10day.feature_id, + maxflows_3day.discharge_cfs AS maxflow_3day_cfs, + maxflows_5day.discharge_cfs AS maxflow_5day_cfs, + maxflows_10day.discharge_cfs AS maxflow_10day_cfs, + maxflows_10day.nwm_vers, + maxflows_10day.reference_time, CASE - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_50_0_17c THEN '2'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_25_0_17c THEN '4'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_10_0_17c THEN '10'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_5_0_17c THEN '20'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_2_0_17c THEN '50'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.high_water_threshold THEN '>50'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_50_0_17c THEN '2'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_25_0_17c THEN '4'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_10_0_17c THEN '10'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_5_0_17c THEN '20'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_2_0_17c THEN '50'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.high_water_threshold THEN '>50'::text ELSE NULL::text END AS recur_cat_3day, CASE - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_50_0_17c THEN '2'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_25_0_17c THEN '4'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_10_0_17c THEN '10'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_5_0_17c THEN '20'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_2_0_17c THEN '50'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.high_water_threshold THEN '>50'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_50_0_17c THEN '2'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_25_0_17c THEN '4'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_10_0_17c THEN '10'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_5_0_17c THEN '20'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_2_0_17c THEN '50'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.high_water_threshold THEN '>50'::text ELSE NULL::text END AS recur_cat_5day, CASE - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_50_0_17c THEN '2'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_25_0_17c THEN '4'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_10_0_17c THEN '10'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_5_0_17c THEN '20'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_2_0_17c THEN '50'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.high_water_threshold THEN '>50'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_50_0_17c THEN '2'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_25_0_17c THEN '4'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_10_0_17c THEN '10'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_5_0_17c THEN '20'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_2_0_17c THEN '50'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.high_water_threshold THEN '>50'::text ELSE NULL::text END AS recur_cat_10day, thresholds.high_water_threshold AS high_water_threshold, @@ -39,9 +39,11 @@ WITH high_flow_mag AS ( thresholds.rf_10_0_17c AS flow_10yr, thresholds.rf_25_0_17c AS flow_25yr, thresholds.rf_50_0_17c AS flow_50yr - FROM cache.max_flows_mrf_nbm maxflows - JOIN derived.recurrence_flows_conus thresholds ON maxflows.feature_id = thresholds.feature_id - WHERE (thresholds.high_water_threshold > 0::double precision) AND maxflows.maxflow_10day_cfs >= thresholds.high_water_threshold + FROM cache.max_flows_mrf_nbm_10day AS maxflows_10day + JOIN cache.max_flows_mrf_nbm_5day AS maxflows_5day ON maxflows_10day.feature_id = maxflows_5day.feature_id + JOIN cache.max_flows_mrf_nbm_3day AS maxflows_3day ON maxflows_10day.feature_id = maxflows_3day.feature_id + JOIN derived.recurrence_flows_conus thresholds ON maxflows_10day.feature_id = thresholds.feature_id + WHERE (thresholds.high_water_threshold > 0::double precision) AND maxflows_10day.discharge_cfs >= thresholds.high_water_threshold ) SELECT channels.feature_id, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_peak_flow_arrival_time.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_peak_flow_arrival_time.sql index f6145bac..b43b011a 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_peak_flow_arrival_time.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_peak_flow_arrival_time.sql @@ -11,7 +11,7 @@ SELECT channels.state, forecasts.nwm_vers, forecasts.reference_time, - max_flows.maxflow_10day_cfs AS max_flow_cfs, + max_flows.discharge_cfs AS max_flow_cfs, rf.high_water_threshold, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, @@ -22,8 +22,8 @@ INTO publish.mrf_nbm_10day_peak_flow_arrival_time FROM ingest.nwm_channel_rt_mrf_nbm AS forecasts -- Join in max flows on max streamflow to only get peak flows -JOIN cache.max_flows_mrf_nbm AS max_flows - ON forecasts.feature_id = max_flows.feature_id AND round((forecasts.streamflow*35.315)::numeric, 2) = max_flows.maxflow_10day_cfs +JOIN cache.max_flows_mrf_nbm_10day AS max_flows + ON forecasts.feature_id = max_flows.feature_id AND round((forecasts.streamflow*35.315)::numeric, 2) = max_flows.discharge_cfs -- Join in channels data to get reach metadata and geometry JOIN derived.channels_conus AS channels ON forecasts.feature_id = channels.feature_id @@ -35,4 +35,4 @@ JOIN derived.recurrence_flows_conus AS rf ON forecasts.feature_id = rf.feature_i JOIN publish.mrf_nbm_10day_high_water_arrival_time AS arrival_time ON forecasts.feature_id = arrival_time.feature_id and forecasts.reference_time = arrival_time.reference_time WHERE round((forecasts.streamflow*35.315)::numeric, 2) >= rf.high_water_threshold -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, forecasts.streamflow, channels.name, channels.strm_order, channels.huc6, channels.state, rf.high_water_threshold, max_flows.maxflow_10day_cfs, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, channels.geom; \ No newline at end of file +GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, forecasts.streamflow, channels.name, channels.strm_order, channels.huc6, channels.state, rf.high_water_threshold, max_flows.discharge_cfs, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, channels.geom; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_rapid_onset_flooding.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_rapid_onset_flooding.sql index 7d8b700c..d6a05b66 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_rapid_onset_flooding.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend/mrf_nbm_10day_rapid_onset_flooding.sql @@ -10,7 +10,7 @@ WITH rapid_onset AS ( ( WITH series AS -- Calculate a full 240 hour series for every feature_id, so that unadjacent hours aren't compared (SELECT channels.feature_id, generate_series(3,240,3) AS forecast_hour - FROM derived.channels_conus channels JOIN cache.max_flows_mrf_nbm as mf on channels.feature_id = mf.feature_id + FROM derived.channels_conus channels JOIN cache.max_flows_mrf_nbm_10day as mf on channels.feature_id = mf.feature_id WHERE channels.strm_order <= 4 ) SELECT series.feature_id, series.forecast_hour, CASE WHEN streamflow is NOT NULL THEN (streamflow * 35.315) ELSE 0.001 END AS streamflow -- Set streamflow to 0.01 in cases where it is missing, so we don't get a divide by zero error diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend_alaska/mrf_nbm_10day_peak_flow_arrival_time_alaska.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend_alaska/mrf_nbm_10day_peak_flow_arrival_time_alaska.sql index b741225c..e9c23c9e 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend_alaska/mrf_nbm_10day_peak_flow_arrival_time_alaska.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_blend_alaska/mrf_nbm_10day_peak_flow_arrival_time_alaska.sql @@ -15,7 +15,7 @@ SELECT to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * min(forecasts.forecast_hour), 'YYYY-MM-DD HH24:MI:SS UTC') AS peak_flow_arrival_time, forecasts.nwm_vers, forecasts.reference_time, - max_flows.maxflow_10day_cfs AS max_flow_cfs, + max_flows.discharge_cfs AS max_flow_cfs, arrival_time.below_bank_return_hour, to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * arrival_time.below_bank_return_hour, 'YYYY-MM-DD HH24:MI:SS UTC') AS below_bank_return_time, to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, @@ -30,8 +30,8 @@ INTO publish.mrf_nbm_10day_peak_flow_arrival_time_alaska FROM ingest.nwm_channel_rt_mrf_nbm_ak AS forecasts -- Join in max flows on max streamflow to only get peak flows -JOIN cache.mrf_nbm_max_flows_ak AS max_flows - ON forecasts.feature_id = max_flows.feature_id AND round((forecasts.streamflow*35.315)::numeric, 2) = max_flows.maxflow_10day_cfs +JOIN cache.mrf_nbm_max_flows_ak_10day AS max_flows + ON forecasts.feature_id = max_flows.feature_id AND round((forecasts.streamflow*35.315)::numeric, 2) = max_flows.discharge_cfs -- Join in channels data to get reach metadata and geometry JOIN derived.channels_alaska as channels ON forecasts.feature_id = channels.feature_id::bigint @@ -39,4 +39,4 @@ JOIN derived.channels_alaska as channels ON forecasts.feature_id = channels.feat -- Join in arrival_time JOIN arrival_time ON forecasts.feature_id = arrival_time.feature_id -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, forecasts.streamflow, max_flows.maxflow_10day_cfs, arrival_time.below_bank_return_hour, channels.geom, channels.strm_order, channels.name, channels.huc6; \ No newline at end of file +GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, forecasts.streamflow, max_flows.discharge_cfs, arrival_time.below_bank_return_hour, channels.geom, channels.strm_order, channels.name, channels.huc6; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_max_high_flow_magnitude.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_max_high_flow_magnitude.sql index 502e982e..409fdf4f 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_max_high_flow_magnitude.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_max_high_flow_magnitude.sql @@ -1,36 +1,36 @@ DROP TABLE IF EXISTS publish.mrf_gfs_10day_max_high_flow_magnitude; WITH high_flow_mag AS ( - SELECT maxflows.feature_id, - maxflows.maxflow_3day_cfs, - maxflows.maxflow_5day_cfs, - maxflows.maxflow_10day_cfs, - maxflows.nwm_vers, - maxflows.reference_time, + SELECT maxflows_10day.feature_id, + maxflows_3day.discharge_cfs AS maxflow_3day_cfs, + maxflows_5day.discharge_cfs AS maxflow_5day_cfs, + maxflows_10day.discharge_cfs AS maxflow_10day_cfs, + maxflows_10day.nwm_vers, + maxflows_10day.reference_time, CASE - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_50_0_17c THEN '2'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_25_0_17c THEN '4'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_10_0_17c THEN '10'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_5_0_17c THEN '20'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.rf_2_0_17c THEN '50'::text - WHEN maxflows.maxflow_3day_cfs >= thresholds.high_water_threshold THEN '>50'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_50_0_17c THEN '2'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_25_0_17c THEN '4'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_10_0_17c THEN '10'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_5_0_17c THEN '20'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.rf_2_0_17c THEN '50'::text + WHEN maxflows_3day.discharge_cfs >= thresholds.high_water_threshold THEN '>50'::text ELSE NULL::text END AS recur_cat_3day, CASE - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_50_0_17c THEN '2'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_25_0_17c THEN '4'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_10_0_17c THEN '10'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_5_0_17c THEN '20'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.rf_2_0_17c THEN '50'::text - WHEN maxflows.maxflow_5day_cfs >= thresholds.high_water_threshold THEN '>50'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_50_0_17c THEN '2'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_25_0_17c THEN '4'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_10_0_17c THEN '10'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_5_0_17c THEN '20'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.rf_2_0_17c THEN '50'::text + WHEN maxflows_5day.discharge_cfs >= thresholds.high_water_threshold THEN '>50'::text ELSE NULL::text END AS recur_cat_5day, CASE - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_50_0_17c THEN '2'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_25_0_17c THEN '4'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_10_0_17c THEN '10'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_5_0_17c THEN '20'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.rf_2_0_17c THEN '50'::text - WHEN maxflows.maxflow_10day_cfs >= thresholds.high_water_threshold THEN '>50'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_50_0_17c THEN '2'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_25_0_17c THEN '4'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_10_0_17c THEN '10'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_5_0_17c THEN '20'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.rf_2_0_17c THEN '50'::text + WHEN maxflows_10day.discharge_cfs >= thresholds.high_water_threshold THEN '>50'::text ELSE NULL::text END AS recur_cat_10day, thresholds.high_water_threshold AS high_water_threshold, @@ -39,9 +39,11 @@ WITH high_flow_mag AS ( thresholds.rf_10_0_17c AS flow_10yr, thresholds.rf_25_0_17c AS flow_25yr, thresholds.rf_50_0_17c AS flow_50yr - FROM cache.max_flows_mrf_gfs maxflows - JOIN derived.recurrence_flows_conus thresholds ON maxflows.feature_id = thresholds.feature_id - WHERE (thresholds.high_water_threshold > 0::double precision) AND maxflows.maxflow_10day_cfs >= thresholds.high_water_threshold + FROM cache.max_flows_mrf_gfs_10day AS maxflows_10day + JOIN cache.max_flows_mrf_gfs_5day AS maxflows_5day ON maxflows_10day.feature_id = maxflows_5day.feature_id + JOIN cache.max_flows_mrf_gfs_3day AS maxflows_3day ON maxflows_10day.feature_id = maxflows_3day.feature_id + JOIN derived.recurrence_flows_conus thresholds ON maxflows_10day.feature_id = thresholds.feature_id + WHERE (thresholds.high_water_threshold > 0::double precision) AND maxflows_10day.discharge_cfs >= thresholds.high_water_threshold ) SELECT channels.feature_id, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_peak_flow_arrival_time.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_peak_flow_arrival_time.sql index f7b93ec1..0b8be516 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_peak_flow_arrival_time.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_peak_flow_arrival_time.sql @@ -11,7 +11,7 @@ SELECT channels.state, forecasts.nwm_vers, forecasts.reference_time, - max_flows.maxflow_10day_cfs AS max_flow_cfs, + max_flows.discharge_cfs AS max_flow_cfs, rf.high_water_threshold, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, @@ -22,8 +22,8 @@ INTO publish.mrf_gfs_10day_peak_flow_arrival_time FROM ingest.nwm_channel_rt_mrf_gfs_mem1 AS forecasts -- Join in max flows on max streamflow to only get peak flows -JOIN cache.max_flows_mrf_gfs AS max_flows - ON forecasts.feature_id = max_flows.feature_id AND round((forecasts.streamflow*35.315)::numeric, 2) = max_flows.maxflow_10day_cfs +JOIN cache.max_flows_mrf_gfs_10day AS max_flows + ON forecasts.feature_id = max_flows.feature_id AND round((forecasts.streamflow*35.315)::numeric, 2) = max_flows.discharge_cfs -- Join in channels data to get reach metadata and geometry JOIN derived.channels_conus AS channels ON forecasts.feature_id = channels.feature_id @@ -35,4 +35,4 @@ JOIN derived.recurrence_flows_conus AS rf ON forecasts.feature_id = rf.feature_i JOIN publish.mrf_gfs_10day_high_water_arrival_time AS arrival_time ON forecasts.feature_id = arrival_time.feature_id and forecasts.reference_time = arrival_time.reference_time WHERE round((forecasts.streamflow*35.315)::numeric, 2) >= rf.high_water_threshold -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, forecasts.streamflow, channels.name, channels.strm_order, channels.huc6, channels.state, rf.high_water_threshold, max_flows.maxflow_10day_cfs, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, channels.geom; \ No newline at end of file +GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, forecasts.streamflow, channels.name, channels.strm_order, channels.huc6, channels.state, rf.high_water_threshold, max_flows.discharge_cfs, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, channels.geom; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_rapid_onset_flooding.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_rapid_onset_flooding.sql index a433980a..63149eb1 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_rapid_onset_flooding.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/medium_range_mem1/mrf_gfs_10day_rapid_onset_flooding.sql @@ -10,7 +10,7 @@ WITH rapid_onset AS ( ( WITH series AS -- Calculate a full 240 hour series for every feature_id, so that unadjacent hours aren't compared (SELECT channels.feature_id, generate_series(3,240,3) AS forecast_hour - FROM derived.channels_conus channels JOIN cache.max_flows_mrf_gfs as mf on channels.feature_id = mf.feature_id + FROM derived.channels_conus channels JOIN cache.max_flows_mrf_gfs_10day as mf on channels.feature_id = mf.feature_id WHERE channels.strm_order <= 4 ) SELECT series.feature_id, series.forecast_hour, CASE WHEN streamflow is NOT NULL THEN (streamflow * 35.315) ELSE 0.001 END AS streamflow -- Set streamflow to 0.01 in cases where it is missing, so we don't get a divide by zero error diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/replace_route/rfc_based_5day_max_streamflow.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/replace_route/rfc_based_5day_max_streamflow.sql index b2344440..664b1157 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/replace_route/rfc_based_5day_max_streamflow.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/replace_route/rfc_based_5day_max_streamflow.sql @@ -9,8 +9,8 @@ max_flows_station_xwalk AS ( station.rfc_defined_fcst_point, mf.reference_time, mf.time_of_max, - mf.maxflow_5day_cfs as streamflow, - mf.maxflow_5day_cms as streamflow_cms, + mf.discharge_cfs as streamflow, + mf.discharge_cms as streamflow_cms, rl.downstream_feature_id, rl.stream_order, rl.stream_length, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_max_high_flow_magnitude.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_max_high_flow_magnitude.sql index fa7c256f..d2544fa2 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_max_high_flow_magnitude.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_max_high_flow_magnitude.sql @@ -1,16 +1,16 @@ DROP TABLE IF EXISTS publish.srf_18hr_max_high_flow_magnitude; WITH high_flow_mag AS ( SELECT maxflows.feature_id, - maxflows.maxflow_18hour_cfs AS max_flow, + maxflows.discharge_cfs AS max_flow, maxflows.nwm_vers, maxflows.reference_time, CASE - WHEN maxflows.maxflow_18hour_cfs >= thresholds.rf_50_0_17c THEN '2'::text - WHEN maxflows.maxflow_18hour_cfs >= thresholds.rf_25_0_17c THEN '4'::text - WHEN maxflows.maxflow_18hour_cfs >= thresholds.rf_10_0_17c THEN '10'::text - WHEN maxflows.maxflow_18hour_cfs >= thresholds.rf_5_0_17c THEN '20'::text - WHEN maxflows.maxflow_18hour_cfs >= thresholds.rf_2_0_17c THEN '50'::text - WHEN maxflows.maxflow_18hour_cfs >= thresholds.high_water_threshold THEN '>50'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_50_0_17c THEN '2'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_25_0_17c THEN '4'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_10_0_17c THEN '10'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_5_0_17c THEN '20'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_2_0_17c THEN '50'::text + WHEN maxflows.discharge_cfs >= thresholds.high_water_threshold THEN '>50'::text ELSE NULL::text END AS recur_cat, thresholds.high_water_threshold AS high_water_threshold, @@ -21,7 +21,7 @@ WITH high_flow_mag AS ( thresholds.rf_50_0_17c AS flow_50yr FROM cache.max_flows_srf maxflows JOIN derived.recurrence_flows_conus thresholds ON maxflows.feature_id = thresholds.feature_id - WHERE thresholds.high_water_threshold > 0::double precision AND maxflows.maxflow_18hour_cfs >= thresholds.high_water_threshold + WHERE thresholds.high_water_threshold > 0::double precision AND maxflows.discharge_cfs >= thresholds.high_water_threshold ) SELECT channels.feature_id, channels.feature_id::TEXT AS feature_id_str, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_peak_flow_arrival_time.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_peak_flow_arrival_time.sql index 0b87dc9f..b69523bd 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_peak_flow_arrival_time.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_peak_flow_arrival_time.sql @@ -13,7 +13,7 @@ SELECT CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * min(forecast_hour), 'YYYY-MM-DD HH24:MI:SS UTC') END AS peak_flow_arrival_time, CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE arrival_time.below_bank_return_hour END AS below_bank_return_hour, CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE arrival_time.below_bank_return_time END AS below_bank_return_time, - round((max_flows.maxflow_18hour_cms*35.315)::numeric, 2) AS max_flow_cfs, + round(max_flows.discharge_cfs::numeric, 2) AS max_flow_cfs, rf.high_water_threshold, to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, channels.geom @@ -23,7 +23,7 @@ FROM ingest.nwm_channel_rt_srf AS forecasts -- Join in max flows on max streamflow to only get peak flows JOIN cache.max_flows_srf AS max_flows - ON forecasts.feature_id = max_flows.feature_id AND forecasts.streamflow = max_flows.maxflow_18hour_cms + ON forecasts.feature_id = max_flows.feature_id AND forecasts.streamflow = max_flows.discharge_cms -- Join in channels data to get reach metadata and geometry JOIN derived.channels_conus AS channels ON forecasts.feature_id = channels.feature_id @@ -35,4 +35,4 @@ JOIN derived.recurrence_flows_conus AS rf ON forecasts.feature_id = rf.feature_i JOIN publish.srf_18hr_high_water_arrival_time AS arrival_time ON forecasts.feature_id = arrival_time.feature_id and forecasts.reference_time = arrival_time.reference_time WHERE round((forecasts.streamflow*35.315)::numeric, 2) >= rf.high_water_threshold -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, channels.name, channels.strm_order, channels.huc6, channels.state, rf.high_water_threshold, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, max_flows.maxflow_18hour_cms, channels.geom \ No newline at end of file +GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, channels.name, channels.strm_order, channels.huc6, channels.state, rf.high_water_threshold, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, max_flows.discharge_cfs, channels.geom \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_rate_of_change.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_rate_of_change.sql index 5d9b7c04..13dd5308 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_rate_of_change.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range/srf_18hr_rate_of_change.sql @@ -4,7 +4,7 @@ WITH roi AS ( SELECT max_srf.feature_id, thresholds.high_water_threshold FROM cache.max_flows_srf AS max_srf JOIN derived.recurrence_flows_conus thresholds ON max_srf.feature_id = thresholds.feature_id - AND max_srf.maxflow_18hour_cfs >= thresholds.high_water_threshold + AND max_srf.discharge_cfs >= thresholds.high_water_threshold ) SELECT channels.feature_id, @@ -17,11 +17,11 @@ SELECT srf.forecast_hour, srf.nwm_vers, srf.reference_time, - ana.maxflow_1hour_cfs as current_flow, + ana.discharge_cfs as current_flow, round((srf.streamflow * 35.315)::numeric, 2) as forecast_flow, roi.high_water_threshold, - round(((srf.streamflow * 35.315) - ana.maxflow_1hour_cfs)::numeric, 2) as change_cfs, - round((((srf.streamflow * 35.315) - ana.maxflow_1hour_cfs)*100/ana.maxflow_1hour_cfs)::numeric, 2) as change_perc, + round(((srf.streamflow * 35.315) - ana.discharge_cfs)::numeric, 2) as change_cfs, + round((((srf.streamflow * 35.315) - ana.discharge_cfs)*100/ana.discharge_cfs)::numeric, 2) as change_perc, to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time INTO publish.srf_18hr_rate_of_change FROM ingest.nwm_channel_rt_srf as srf diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_alaska/srf_15hr_peak_flow_arrival_time_alaska.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_alaska/srf_15hr_peak_flow_arrival_time_alaska.sql index 90c633ff..1754d7a4 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_alaska/srf_15hr_peak_flow_arrival_time_alaska.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_alaska/srf_15hr_peak_flow_arrival_time_alaska.sql @@ -16,7 +16,7 @@ SELECT to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * min(forecast_hour), 'YYYY-MM-DD HH24:MI:SS UTC') AS peak_flow_arrival_time, arrival_time.below_bank_return_hour, to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * arrival_time.below_bank_return_hour, 'YYYY-MM-DD HH24:MI:SS UTC') AS below_bank_return_time, - round((max_flows.maxflow_15hour_cms*35.315)::numeric, 2) AS max_flow_cfs, + round(max_flows.discharge_cfs::numeric, 2) AS max_flow_cfs, to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, channels.strm_order::integer, channels.name, @@ -29,7 +29,7 @@ FROM ingest.nwm_channel_rt_srf_ak AS forecasts -- Join in max flows on max streamflow to only get peak flows JOIN cache.max_flows_srf_ak AS max_flows - ON forecasts.feature_id = max_flows.feature_id AND forecasts.streamflow = max_flows.maxflow_15hour_cms + ON forecasts.feature_id = max_flows.feature_id AND forecasts.streamflow = max_flows.discharge_cms -- Join in channels data to get reach metadata and geometry JOIN derived.channels_alaska as channels ON forecasts.feature_id = channels.feature_id::bigint @@ -37,4 +37,4 @@ JOIN derived.channels_alaska as channels ON forecasts.feature_id = channels.feat -- Join in arrival_time query results JOIN arrival_time ON forecasts.feature_id = arrival_time.feature_id -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, arrival_time.below_bank_return_hour, max_flows.maxflow_15hour_cms, channels.geom, channels.strm_order, channels.name, channels.huc6; \ No newline at end of file +GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, arrival_time.below_bank_return_hour, max_flows.discharge_cfs, channels.geom, channels.strm_order, channels.name, channels.huc6; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_hawaii/srf_48hr_max_high_flow_magnitude_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_hawaii/srf_48hr_max_high_flow_magnitude_hi.sql index 80fc309e..f4669858 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_hawaii/srf_48hr_max_high_flow_magnitude_hi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_hawaii/srf_48hr_max_high_flow_magnitude_hi.sql @@ -1,17 +1,17 @@ DROP TABLE IF EXISTS publish.srf_48hr_max_high_flow_magnitude_hi; WITH high_flow_mag AS ( SELECT maxflows.feature_id, - maxflows.maxflow_48hour_cfs AS max_flow, + maxflows.discharge_cfs AS max_flow, maxflows.nwm_vers, maxflows.reference_time, CASE WHEN thresholds.high_water_threshold = '-9999'::integer::double precision THEN 'Not Available'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_100_0 THEN '1'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_50_0 THEN '2'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_25_0 THEN '4'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_10_0 THEN '10'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_5_0 THEN '20'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.high_water_threshold THEN '>20'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_100_0 THEN '1'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_50_0 THEN '2'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_25_0 THEN '4'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_10_0 THEN '10'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_5_0 THEN '20'::text + WHEN maxflows.discharge_cfs >= thresholds.high_water_threshold THEN '>20'::text ELSE NULL::text END AS recur_cat, thresholds.high_water_threshold AS high_water_threshold, @@ -23,7 +23,7 @@ WITH high_flow_mag AS ( thresholds.rf_100_0 AS flow_100yr FROM cache.max_flows_srf_hi maxflows JOIN derived.recurrence_flows_hi thresholds ON maxflows.feature_id = thresholds.feature_id - WHERE thresholds.high_water_threshold > 0::double precision AND maxflows.maxflow_48hour_cfs >= thresholds.high_water_threshold + WHERE thresholds.high_water_threshold > 0::double precision AND maxflows.discharge_cfs >= thresholds.high_water_threshold ) SELECT channels.feature_id, channels.feature_id::TEXT AS feature_id_str, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_hawaii/srf_48hr_peak_flow_arrival_time_hi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_hawaii/srf_48hr_peak_flow_arrival_time_hi.sql index 280721d2..fe15672b 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_hawaii/srf_48hr_peak_flow_arrival_time_hi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_hawaii/srf_48hr_peak_flow_arrival_time_hi.sql @@ -13,7 +13,7 @@ SELECT CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * min(forecast_hour), 'YYYY-MM-DD HH24:MI:SS UTC') END AS peak_flow_arrival_time, CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE arrival_time.below_bank_return_hour END AS below_bank_return_hour, CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE arrival_time.below_bank_return_time END AS below_bank_return_time, - round((max_flows.maxflow_48hour_cms*35.315)::numeric, 2) AS max_flow_cfs, + round(max_flows.discharge_cfs::numeric, 2) AS max_flow_cfs, rf.high_water_threshold, to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, channels.geom @@ -23,7 +23,7 @@ FROM ingest.nwm_channel_rt_srf_hi AS forecasts -- Join in max flows on max streamflow to only get peak flows JOIN cache.max_flows_srf_hi AS max_flows - ON forecasts.feature_id = max_flows.feature_id AND forecasts.streamflow = max_flows.maxflow_48hour_cms + ON forecasts.feature_id = max_flows.feature_id AND forecasts.streamflow = max_flows.discharge_cms -- Join in channels data to get reach metadata JOIN derived.channels_hi as channels ON forecasts.feature_id = channels.feature_id @@ -35,7 +35,7 @@ JOIN derived.recurrence_flows_hi as rf ON forecasts.feature_id = rf.feature_id JOIN publish.srf_48hr_high_water_arrival_time_hi as arrival_time ON forecasts.feature_id = arrival_time.feature_id and forecasts.reference_time = arrival_time.reference_time WHERE (rf.high_water_threshold > 0 OR rf.high_water_threshold = '-9999') AND forecasts.streamflow * 35.315::double precision >= rf.high_water_threshold -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, channels.name, channels.strm_order, channels.huc6, rf.high_water_threshold, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, max_flows.maxflow_48hour_cms, channels.geom; +GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, channels.name, channels.strm_order, channels.huc6, rf.high_water_threshold, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, max_flows.discharge_cms, channels.geom; --Add an empty row so that service monitor will pick up a reference and update time in the event of no fim features INSERT INTO publish.srf_48hr_peak_flow_arrival_time_hi( diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_puertorico/srf_48hr_max_high_flow_magnitude_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_puertorico/srf_48hr_max_high_flow_magnitude_prvi.sql index ef58d8d2..91d31c0f 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_puertorico/srf_48hr_max_high_flow_magnitude_prvi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_puertorico/srf_48hr_max_high_flow_magnitude_prvi.sql @@ -1,17 +1,17 @@ DROP TABLE IF EXISTS publish.srf_48hr_max_high_flow_magnitude_prvi; WITH high_flow_mag AS ( SELECT maxflows.feature_id, - maxflows.maxflow_48hour_cfs AS max_flow, + maxflows.discharge_cfs AS max_flow, maxflows.nwm_vers, maxflows.reference_time, CASE WHEN thresholds.high_water_threshold = '-9999'::integer::double precision THEN 'Not Available'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_100_0 THEN '1'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_50_0 THEN '2'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_25_0 THEN '4'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_10_0 THEN '10'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.rf_5_0 THEN '20'::text - WHEN maxflows.maxflow_48hour_cfs >= thresholds.high_water_threshold THEN '>20'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_100_0 THEN '1'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_50_0 THEN '2'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_25_0 THEN '4'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_10_0 THEN '10'::text + WHEN maxflows.discharge_cfs >= thresholds.rf_5_0 THEN '20'::text + WHEN maxflows.discharge_cfs >= thresholds.high_water_threshold THEN '>20'::text ELSE NULL::text END AS recur_cat, thresholds.high_water_threshold AS high_water_threshold, @@ -23,7 +23,7 @@ WITH high_flow_mag AS ( thresholds.rf_100_0 AS flow_100yr FROM cache.max_flows_srf_prvi maxflows JOIN derived.recurrence_flows_prvi thresholds ON maxflows.feature_id = thresholds.feature_id - WHERE thresholds.high_water_threshold > 0::double precision AND maxflows.maxflow_48hour_cfs >= thresholds.high_water_threshold + WHERE thresholds.high_water_threshold > 0::double precision AND maxflows.discharge_cfs >= thresholds.high_water_threshold ) SELECT channels.feature_id, channels.feature_id::TEXT AS feature_id_str, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_puertorico/srf_48hr_peak_flow_arrival_time_prvi.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_puertorico/srf_48hr_peak_flow_arrival_time_prvi.sql index 4bce4924..5bff9870 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_puertorico/srf_48hr_peak_flow_arrival_time_prvi.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/products/short_range_puertorico/srf_48hr_peak_flow_arrival_time_prvi.sql @@ -13,7 +13,7 @@ SELECT CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE to_char(forecasts.reference_time::timestamp without time zone + INTERVAL '1 hour' * min(forecast_hour), 'YYYY-MM-DD HH24:MI:SS UTC') END AS peak_flow_arrival_time, CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE arrival_time.below_bank_return_hour END AS below_bank_return_hour, CASE WHEN rf.high_water_threshold = -9999 THEN NULL ELSE arrival_time.below_bank_return_time END AS below_bank_return_time, - round((max_flows.maxflow_48hour_cms*35.315)::numeric, 2) AS max_flow_cfs, + round(max_flows.discharge_cfs::numeric, 2) AS max_flow_cfs, rf.high_water_threshold, to_char(now()::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS update_time, channels.geom @@ -23,7 +23,7 @@ FROM ingest.nwm_channel_rt_srf_prvi AS forecasts -- Join in max flows on max streamflow to only get peak flows JOIN cache.max_flows_srf_prvi AS max_flows - ON forecasts.feature_id = max_flows.feature_id AND forecasts.streamflow = max_flows.maxflow_48hour_cms + ON forecasts.feature_id = max_flows.feature_id AND forecasts.streamflow = max_flows.discharge_cms -- Join in channels data to get reach metadata JOIN derived.channels_prvi as channels ON forecasts.feature_id = channels.feature_id @@ -35,7 +35,7 @@ JOIN derived.recurrence_flows_prvi as rf ON forecasts.feature_id = rf.feature_id JOIN publish.srf_48hr_high_water_arrival_time_prvi as arrival_time ON forecasts.feature_id = arrival_time.feature_id and forecasts.reference_time = arrival_time.reference_time WHERE round((forecasts.streamflow*35.315)::numeric, 2) >= rf.high_water_threshold -GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, channels.name, channels.strm_order, channels.huc6, rf.high_water_threshold, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, max_flows.maxflow_48hour_cms, channels.geom; +GROUP BY forecasts.feature_id, forecasts.reference_time, forecasts.nwm_vers, channels.name, channels.strm_order, channels.huc6, rf.high_water_threshold, arrival_time.below_bank_return_hour, arrival_time.below_bank_return_time, max_flows.discharge_cms, channels.geom; --Add an empty row so that service monitor will pick up a reference and update time in the event of no fim features INSERT INTO publish.srf_48hr_peak_flow_arrival_time_prvi( diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/readme.md b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/readme.md new file mode 100644 index 00000000..90641b10 --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/readme.md @@ -0,0 +1,32 @@ +The postprocess_sql lambda function is a generalized lightweight lambda function used throughout VPP pipelines to execute SQL on a specified database (PostgreSQL RDS or Redshift) +It is typically used for operations like: + - Ingest prep: Truncate a table and delete indicies prior to ingesting new data. Create a table if it doesn't exist yet. + - Ingest finish: Rebuild indicies after ingest. + - Database transformations: Query and/or aggregate ingested data into new table for a map service + - Various steps of FIM Workflows (details below): With the implementation of FIM Caching to a Redshift Data Warehouse in HydroVIS version 2.2(?), this function can now be used to execute template SQL files as well. The template to use is defined by the input parameters in the step function, and generally does one of these things: + - Create a table if it doesn't exist + - Truncate a inundation table for a new pipeline run + - Copy data from RDS to Redshift or vice versa (via Foreign Data Wrappers and External Schemas) + - Data transformations / aggregations. + +############################################# FIM Workflow ############################################## +The following database operations are taken below to process FIM, largely via the fim_caching_templates sql files in this lambda function +0. Create four tables, if they don't already exist, on both RDS and Redshift. These tables replicate the schema of the HAND cache on Redshift, and are truncated and re-populated as part of each FIM run: + - ingest.{fim_config}_flows - this is a version of max_flows, with fim crosswalk columns added, as well as filtering for hight water threshold + - ingest.{fim_config} - this is the fim table, but without geometry + - ingest.{fim_config}_geo - this is the geometries for the fim table (one-to-many, since we're subdividing to keep geometries small for Redshift) + - ingest.{fim_config}_zero_stage - this table holds all of the fim features (hydro_table, feature_id, huc8, branch combinations) that have zero or NaN stage at the current discharge value + - ingest.{fim_config}_geo_view (RDS only) - this view subdivides the newly polygons in the inundation_geo table (because Redshift has a limit on the size of geometries) + - publish.{fim_config} (RDS only) - This is the finished publish table that gets copied to the EGIS service +1. Populate the FIM flows table on RDS (from max_flows with some joins), then copy it to Redshift +2. Query the HAND Cache on Redshift + a. Query the HAND cache on Redshift, joining to the just-populated flows table, to populate the inundation, inundation_geo, and inundation_zero_stage tables on Redshift +3. Populate the inundation tables on RDS + a. Prioritize Ras2FIM by querying the Ras2FIM cache on RDS first #TODO + b. Copy the FIM tables on Redshift (which were just populated from the HAND cache in 2a) into the inundation tables on RDS (skipping any records that were already added from Ras2FIM) + c. HAND processing for any FIM features remaining in the inundation flows table, that have not been added to the inundation table from Ras2FIM or the HAND cache (not done here, but administered by the fim_data_prep lambda function +4. Generate publish.inundation table on RDS, and copy it to the EGIS (done via the update_egis_data function) + a. We can use a template to do this generically for most inland inundation configurations (e.g. NWM) +5. Add any newly generated HAND features in this run into the Redshift HAND cache ( #TODO: it would be good to figure out how to do this in parallel outside of the fim_config map, so that this doesn't hold things up). + a. Insert records from the RDS inundation, inundation_geo, and inundation_zero_stage tables/view into the Redshift HAND cache tables, only taking records generated by HAND Processing, and which the primary key does not already exist (hydro_id, feature_id, huc8, branch, rc_stage_ft) + \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_inundation/building_footprints_fimpact.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_inundation/building_footprints_fimpact.sql index 58785246..e32082b2 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_inundation/building_footprints_fimpact.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_inundation/building_footprints_fimpact.sql @@ -18,7 +18,7 @@ SELECT fim.feature_id, fim.feature_id_str::TEXT AS feature_id_str, fim.streamflow_cfs, - fim.fim_stage_ft, + fim.rc_stage_ft as fim_stage_ft, buildings.geom, ST_Centroid(buildings.geom) as geom_xy INTO publish.ana_inundation_building_footprints @@ -33,8 +33,8 @@ SELECT buildings.prop_st as state, max(fim.streamflow_cfs) AS max_flow_cfs, avg(fim.streamflow_cfs) AS avg_flow_cfs, - max(fim.fim_stage_ft) AS max_fim_stage_ft, - avg(fim.fim_stage_ft) AS avg_fim_stage_ft, + max(fim.rc_stage_ft) AS max_fim_stage_ft, + avg(fim.rc_stage_ft) AS avg_fim_stage_ft, count(buildings.build_id) AS buildings_impacted, sum(buildings.sqfeet) AS building_sqft_impacted, sum(CASE WHEN buildings.occ_cls = 'Agriculture' THEN 1 ELSE 0 END) AS bldgs_agriculture, @@ -63,8 +63,8 @@ SELECT TO_CHAR(hucs.huc10, 'fm0000000000') AS huc10_str, max(fim.streamflow_cfs) AS max_flow_cfs, avg(fim.streamflow_cfs) AS avg_flow_cfs, - max(fim.fim_stage_ft) AS max_fim_stage_ft, - avg(fim.fim_stage_ft) AS avg_fim_stage_ft, + max(fim.rc_stage_ft) AS max_fim_stage_ft, + avg(fim.rc_stage_ft) AS avg_fim_stage_ft, count(buildings.build_id) AS buildings_impacted, sum(buildings.sqfeet) AS building_sqft_impacted, sum(CASE WHEN buildings.occ_cls = 'Agriculture' THEN 1 ELSE 0 END) AS bldgs_agriculture, diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_inundation/src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_inundation/src_skill.sql index d1513f08..26912124 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_inundation/src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_inundation/src_skill.sql @@ -6,20 +6,20 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - maxflow_1hour_cfs, - MIN(ht.elevation_ft) + ((maxflow_1hour_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((maxflow_1hour_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((maxflow_1hour_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((maxflow_1hour_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + ana.discharge_cfs AS maxflow_1hour_cfs, + MIN(ht.elevation_ft) + ((ana.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((ana.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((ana.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((ana.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.ana_inundation_src_skill FROM cache.max_flows_ana AS ana -JOIN derived.recurrence_flows_conus thresholds ON ana.feature_id = thresholds.feature_id AND ana.maxflow_1hour_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = ana.feature_id AND ana.maxflow_1hour_cfs >= ht.discharge_cfs AND ana.maxflow_1hour_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND ana.maxflow_1hour_cfs >= urc.discharge_cfs AND ana.maxflow_1hour_cfs <= urc.next_discharge_cfs +JOIN derived.recurrence_flows_conus thresholds ON ana.feature_id = thresholds.feature_id AND ana.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = ana.feature_id AND ana.discharge_cfs >= ht.discharge_cfs AND ana.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND ana.discharge_cfs >= urc.discharge_cfs AND ana.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, maxflow_1hour_cfs; +GROUP BY urc.location_id, ht.feature_id, ana.discharge_cfs; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_past_14day_max_inundation/14day_src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_past_14day_max_inundation/14day_src_skill.sql index fc334286..e1fb5519 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_past_14day_max_inundation/14day_src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_past_14day_max_inundation/14day_src_skill.sql @@ -6,20 +6,20 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - max_flow_14day_cfs, - MIN(ht.elevation_ft) + ((max_flow_14day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((max_flow_14day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((max_flow_14day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((max_flow_14day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + ana.discharge_cfs AS max_flow_14day_cfs, + MIN(ht.elevation_ft) + ((ana.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((ana.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((ana.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((ana.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.ana_past_14day_max_inundation_src_skill FROM cache.max_flows_ana_14day AS ana -JOIN derived.recurrence_flows_conus thresholds ON ana.feature_id = thresholds.feature_id AND ana.max_flow_14day_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = ana.feature_id AND ana.max_flow_14day_cfs >= ht.discharge_cfs AND ana.max_flow_14day_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND ana.max_flow_14day_cfs >= urc.discharge_cfs AND ana.max_flow_14day_cfs <= urc.next_discharge_cfs +JOIN derived.recurrence_flows_conus thresholds ON ana.feature_id = thresholds.feature_id AND ana.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = ana.feature_id AND ana.discharge_cfs >= ht.discharge_cfs AND ana.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND ana.discharge_cfs >= urc.discharge_cfs AND ana.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, max_flow_14day_cfs; +GROUP BY urc.location_id, ht.feature_id, ana.discharge_cfs; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_past_14day_max_inundation/7day_src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_past_14day_max_inundation/7day_src_skill.sql index 68bd9eac..af62d98c 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_past_14day_max_inundation/7day_src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/ana_past_14day_max_inundation/7day_src_skill.sql @@ -6,20 +6,20 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - max_flow_7day_cfs, - MIN(ht.elevation_ft) + ((max_flow_7day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((max_flow_7day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((max_flow_7day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((max_flow_7day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + ana.discharge_cfs AS max_flow_7day_cfs, + MIN(ht.elevation_ft) + ((ana.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((ana.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((ana.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((ana.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.ana_past_7day_max_inundation_src_skill FROM cache.max_flows_ana_7day AS ana -JOIN derived.recurrence_flows_conus thresholds ON ana.feature_id = thresholds.feature_id AND ana.max_flow_7day_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = ana.feature_id AND ana.max_flow_7day_cfs >= ht.discharge_cfs AND ana.max_flow_7day_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND ana.max_flow_7day_cfs >= urc.discharge_cfs AND ana.max_flow_7day_cfs <= urc.next_discharge_cfs +JOIN derived.recurrence_flows_conus thresholds ON ana.feature_id = thresholds.feature_id AND ana.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = ana.feature_id AND ana.discharge_cfs >= ht.discharge_cfs AND ana.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND ana.discharge_cfs >= urc.discharge_cfs AND ana.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, max_flow_7day_cfs; +GROUP BY urc.location_id, ht.feature_id, ana.discharge_cfs; diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/10day_src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/10day_src_skill.sql index 4014b9c1..7a565809 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/10day_src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/10day_src_skill.sql @@ -6,18 +6,18 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - maxflow_10day_cfs, - MIN(ht.elevation_ft) + ((maxflow_10day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((maxflow_10day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((maxflow_10day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((maxflow_10day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + mrf.discharge_cfs AS maxflow_10day_cfs, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.mrf_gfs_max_inundation_10day_src_skill -FROM cache.max_flows_mrf_gfs AS mrf -JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.maxflow_10day_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.maxflow_10day_cfs >= ht.discharge_cfs AND mrf.maxflow_10day_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.maxflow_10day_cfs >= urc.discharge_cfs AND mrf.maxflow_10day_cfs <= urc.next_discharge_cfs +FROM cache.max_flows_mrf_gfs_10day AS mrf +JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.discharge_cfs >= ht.discharge_cfs AND mrf.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.discharge_cfs >= urc.discharge_cfs AND mrf.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, maxflow_10day_cfs; \ No newline at end of file +GROUP BY urc.location_id, ht.feature_id, mrf.discharge_cfs; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/3day_src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/3day_src_skill.sql index df420942..b7dbfaeb 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/3day_src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/3day_src_skill.sql @@ -6,18 +6,18 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - maxflow_3day_cfs, - MIN(ht.elevation_ft) + ((maxflow_3day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((maxflow_3day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((maxflow_3day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((maxflow_3day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + mrf.discharge_cfs AS maxflow_3day_cfs, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.mrf_gfs_max_inundation_3day_src_skill -FROM cache.max_flows_mrf_gfs AS mrf -JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.maxflow_3day_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.maxflow_3day_cfs >= ht.discharge_cfs AND mrf.maxflow_3day_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.maxflow_3day_cfs >= urc.discharge_cfs AND mrf.maxflow_3day_cfs <= urc.next_discharge_cfs +FROM cache.max_flows_mrf_gfs_3day AS mrf +JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.discharge_cfs >= ht.discharge_cfs AND mrf.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.discharge_cfs >= urc.discharge_cfs AND mrf.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, maxflow_3day_cfs; \ No newline at end of file +GROUP BY urc.location_id, ht.feature_id, mrf.discharge_cfs; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/5day_src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/5day_src_skill.sql index f3e89c6f..e19ceda2 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/5day_src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_gfs_10day_max_inundation/5day_src_skill.sql @@ -6,18 +6,18 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - maxflow_5day_cfs, - MIN(ht.elevation_ft) + ((maxflow_5day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((maxflow_5day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((maxflow_5day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((maxflow_5day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + mrf.discharge_cfs AS maxflow_5day_cfs, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.mrf_gfs_max_inundation_5day_src_skill -FROM cache.max_flows_mrf_gfs AS mrf -JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.maxflow_5day_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.maxflow_5day_cfs >= ht.discharge_cfs AND mrf.maxflow_5day_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.maxflow_5day_cfs >= urc.discharge_cfs AND mrf.maxflow_5day_cfs <= urc.next_discharge_cfs +FROM cache.max_flows_mrf_gfs_5day AS mrf +JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.discharge_cfs >= ht.discharge_cfs AND mrf.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.discharge_cfs >= urc.discharge_cfs AND mrf.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, maxflow_5day_cfs; \ No newline at end of file +GROUP BY urc.location_id, ht.feature_id, mrf.discharge_cfs; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/10day_src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/10day_src_skill.sql index 3cdb684b..b6b9174d 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/10day_src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/10day_src_skill.sql @@ -6,18 +6,18 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - maxflow_10day_cfs, - MIN(ht.elevation_ft) + ((maxflow_10day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((maxflow_10day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((maxflow_10day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((maxflow_10day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + mrf.discharge_cfs AS maxflow_10day_cfs, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.mrf_nbm_max_inundation_10day_src_skill -FROM cache.max_flows_mrf_nbm AS mrf -JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.maxflow_10day_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.maxflow_10day_cfs >= ht.discharge_cfs AND mrf.maxflow_10day_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.maxflow_10day_cfs >= urc.discharge_cfs AND mrf.maxflow_10day_cfs <= urc.next_discharge_cfs +FROM cache.max_flows_mrf_nbm_10day AS mrf +JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.discharge_cfs >= ht.discharge_cfs AND mrf.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.discharge_cfs >= urc.discharge_cfs AND mrf.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, maxflow_10day_cfs; \ No newline at end of file +GROUP BY urc.location_id, ht.feature_id, mrf.discharge_cfs; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/3day_src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/3day_src_skill.sql index 8342eaa1..6e2b4ef6 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/3day_src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/3day_src_skill.sql @@ -6,18 +6,18 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - maxflow_3day_cfs, - MIN(ht.elevation_ft) + ((maxflow_3day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((maxflow_3day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((maxflow_3day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((maxflow_3day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + mrf.discharge_cfs AS maxflow_3day_cfs, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.mrf_nbm_max_inundation_3day_src_skill -FROM cache.max_flows_mrf_nbm AS mrf -JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.maxflow_3day_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.maxflow_3day_cfs >= ht.discharge_cfs AND mrf.maxflow_3day_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.maxflow_3day_cfs >= urc.discharge_cfs AND mrf.maxflow_3day_cfs <= urc.next_discharge_cfs +FROM cache.max_flows_mrf_nbm_3day AS mrf +JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.discharge_cfs >= ht.discharge_cfs AND mrf.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.discharge_cfs >= urc.discharge_cfs AND mrf.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, maxflow_3day_cfs; \ No newline at end of file +GROUP BY urc.location_id, ht.feature_id, mrf.discharge_cfs; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/5day_src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/5day_src_skill.sql index 0772e0c8..df10c7c5 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/5day_src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/mrf_nbm_10day_max_inundation/5day_src_skill.sql @@ -6,18 +6,18 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - maxflow_5day_cfs, - MIN(ht.elevation_ft) + ((maxflow_5day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((maxflow_5day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((maxflow_5day_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((maxflow_5day_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + mrf.discharge_cfs AS maxflow_5day_cfs, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((mrf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((mrf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.mrf_nbm_max_inundation_5day_src_skill -FROM cache.max_flows_mrf_nbm AS mrf -JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.maxflow_5day_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.maxflow_5day_cfs >= ht.discharge_cfs AND mrf.maxflow_5day_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.maxflow_5day_cfs >= urc.discharge_cfs AND mrf.maxflow_5day_cfs <= urc.next_discharge_cfs +FROM cache.max_flows_mrf_nbm_5day AS mrf +JOIN derived.recurrence_flows_conus thresholds ON mrf.feature_id = thresholds.feature_id AND mrf.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = mrf.feature_id AND mrf.discharge_cfs >= ht.discharge_cfs AND mrf.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND mrf.discharge_cfs >= urc.discharge_cfs AND mrf.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, maxflow_5day_cfs; \ No newline at end of file +GROUP BY urc.location_id, ht.feature_id, mrf.discharge_cfs; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/srf_18hr_max_inundation/src_skill.sql b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/srf_18hr_max_inundation/src_skill.sql index d22ddacb..aca37f56 100644 --- a/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/srf_18hr_max_inundation/src_skill.sql +++ b/Core/LAMBDA/viz_functions/viz_db_postprocess_sql/summaries/srf_18hr_max_inundation/src_skill.sql @@ -6,18 +6,18 @@ SELECT ht.feature_id, ht.feature_id::text as feature_id_str, to_char('1900-01-01 00:00:00'::timestamp without time zone, 'YYYY-MM-DD HH24:MI:SS UTC') AS reference_time, - maxflow_18hour_cfs, - MIN(ht.elevation_ft) + ((maxflow_18hour_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, - MIN(urc.elevation_ft) + ((maxflow_18hour_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, - MIN(ht.elevation_ft) + ((maxflow_18hour_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - - MIN(urc.elevation_ft) + ((maxflow_18hour_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, + srf.discharge_cfs AS maxflow_18hour_cfs, + MIN(ht.elevation_ft) + ((srf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) as synth_interp_elevation_ft, + MIN(urc.elevation_ft) + ((srf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as usgs_interp_elevation_ft, + MIN(ht.elevation_ft) + ((srf.discharge_cfs - MIN(ht.discharge_cfs)) * ((MAX(ht.next_elevation_ft) - MIN(ht.elevation_ft)) / (MAX(ht.next_discharge_cfs) - MIN(ht.discharge_cfs)))) - + MIN(urc.elevation_ft) + ((srf.discharge_cfs - MIN(urc.discharge_cfs)) * ((MAX(urc.next_elevation_ft) - MIN(urc.elevation_ft)) / (MAX(urc.next_discharge_cfs) - MIN(urc.discharge_cfs)))) as diff_ft, MIN(navd88_datum) as navd88_datum, MIN(stage) as usgs_stage, ST_TRANSFORM(MIN(gage.geo_point), 3857) as geom INTO publish.srf_18hr_max_inundation_src_skill FROM cache.max_flows_srf AS srf -JOIN derived.recurrence_flows_conus thresholds ON srf.feature_id = thresholds.feature_id AND srf.maxflow_18hour_cfs >= thresholds.high_water_threshold -JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = srf.feature_id AND srf.maxflow_18hour_cfs >= ht.discharge_cfs AND srf.maxflow_18hour_cfs <= ht.next_discharge_cfs -JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND srf.maxflow_18hour_cfs >= urc.discharge_cfs AND srf.maxflow_18hour_cfs <= urc.next_discharge_cfs +JOIN derived.recurrence_flows_conus thresholds ON srf.feature_id = thresholds.feature_id AND srf.discharge_cfs >= thresholds.high_water_threshold +JOIN derived.hydrotable_staggered AS ht ON ht.feature_id = srf.feature_id AND srf.discharge_cfs >= ht.discharge_cfs AND srf.discharge_cfs <= ht.next_discharge_cfs +JOIN derived.usgs_rating_curves_staggered AS urc ON urc.location_id::text = ht.location_id AND srf.discharge_cfs >= urc.discharge_cfs AND srf.discharge_cfs <= urc.next_discharge_cfs JOIN external.usgs_gage AS gage ON LPAD(gage.usgs_gage_id::text, 8, '0') = LPAD(ht.location_id::text, 8, '0') -GROUP BY urc.location_id, ht.feature_id, maxflow_18hour_cfs; \ No newline at end of file +GROUP BY urc.location_id, ht.feature_id, srf.discharge_cfs; \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation.sql deleted file mode 100644 index 6eb6579b..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_1hour_cms AS streamflow_cms -FROM cache.max_flows_ana max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_1hour_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation_hi.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation_hi.sql deleted file mode 100644 index 2a91eef7..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation_hi.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_1hour_cms AS streamflow_cms -FROM cache.max_flows_ana_hi max_forecast -JOIN derived.recurrence_flows_hi rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_1hour_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation_prvi.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation_prvi.sql deleted file mode 100644 index 073430b1..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_inundation_prvi.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_1hour_cms AS streamflow_cms -FROM cache.max_flows_ana_prvi max_forecast -JOIN derived.recurrence_flows_prvi rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_1hour_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_past_14day_max_inundation.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_past_14day_max_inundation.sql deleted file mode 100644 index 4aef0b5d..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_past_14day_max_inundation.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.max_flow_14day_cms AS streamflow_cms -FROM cache.max_flows_ana_14day max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.max_flow_14day_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_past_7day_max_inundation.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_past_7day_max_inundation.sql deleted file mode 100644 index f572a818..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/ana_past_7day_max_inundation.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.max_flow_7day_cms AS streamflow_cms -FROM cache.max_flows_ana_7day max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.max_flow_7day_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_10day.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_10day.sql deleted file mode 100644 index 6cf60fa2..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_10day.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_10day_cms AS streamflow_cms -FROM cache.max_flows_mrf_gfs max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_10day_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_3day.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_3day.sql deleted file mode 100644 index 3893f240..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_3day.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_3day_cms AS streamflow_cms -FROM cache.max_flows_mrf_gfs max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_3day_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_5day.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_5day.sql deleted file mode 100644 index 16295156..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_gfs_max_inundation_5day.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_5day_cms AS streamflow_cms -FROM cache.max_flows_mrf_gfs max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_5day_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_10day.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_10day.sql deleted file mode 100644 index 85e2ca6c..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_10day.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_10day_cms AS streamflow_cms -FROM cache.max_flows_mrf_nbm max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_10day_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_3day.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_3day.sql deleted file mode 100644 index a69d5d91..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_3day.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_3day_cms AS streamflow_cms -FROM cache.max_flows_mrf_nbm max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_3day_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_5day.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_5day.sql deleted file mode 100644 index f7d42d74..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/mrf_nbm_max_inundation_5day.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_5day_cms AS streamflow_cms -FROM cache.max_flows_mrf_nbm max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_5day_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_18hr_max_inundation.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_18hr_max_inundation.sql deleted file mode 100644 index 15b25620..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_18hr_max_inundation.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_18hour_cms AS streamflow_cms -FROM cache.max_flows_srf max_forecast -JOIN derived.recurrence_flows_conus rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_18hour_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_48hr_max_inundation_hi.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_48hr_max_inundation_hi.sql deleted file mode 100644 index 48e49f3b..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_48hr_max_inundation_hi.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_48hour_cms AS streamflow_cms -FROM cache.max_flows_srf_hi max_forecast -JOIN derived.recurrence_flows_hi rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_48hour_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_48hr_max_inundation_prvi.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_48hr_max_inundation_prvi.sql deleted file mode 100644 index 138fd94d..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/data_sql/srf_48hr_max_inundation_prvi.sql +++ /dev/null @@ -1,8 +0,0 @@ -SELECT - max_forecast.feature_id, - max_forecast.maxflow_48hour_cms AS streamflow_cms -FROM cache.max_flows_srf_prvi max_forecast -JOIN derived.recurrence_flows_prvi rf ON rf.feature_id=max_forecast.feature_id -WHERE - max_forecast.maxflow_48hour_cfs >= rf.high_water_threshold AND - rf.high_water_threshold > 0::double precision diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/lambda_function.py b/Core/LAMBDA/viz_functions/viz_fim_data_prep/lambda_function.py index cd0f23a9..5d3e9e23 100644 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/lambda_function.py +++ b/Core/LAMBDA/viz_functions/viz_fim_data_prep/lambda_function.py @@ -10,231 +10,94 @@ PROCESSED_OUTPUT_BUCKET = os.environ['PROCESSED_OUTPUT_BUCKET'] PROCESSED_OUTPUT_PREFIX = os.environ['PROCESSED_OUTPUT_PREFIX'] +hand_processing_parallel_groups = 20 S3 = boto3.client('s3') -def lambda_handler(event, context): - """ - The lambda handler is the function that is kicked off with the lambda. This function will take a forecast file, - extract features with streamflows above 1.5 year threshold, and then kick off lambdas for each HUC with valid - data. - - Args: - event(event object): An event is a JSON-formatted document that contains data for a Lambda function to - process - context(object): Provides methods and properties that provide information about the invocation, function, - and runtime environment - """ - +################################################################################################################################################################# +def lambda_handler(event, context): + #This runs within the fim_config map of the step function, once cached fim (hand and ras2fim) has already been loaded, in order to alocate hucs/reaches to the hand processing step function. if event['step'] == "setup_fim_config": return setup_huc_inundation(event) + #This runs within the mapped hand processing step functions to get the hucs for the given iteration. else: return get_branch_iteration(event) +################################################################################################################################################################# +# This function runs at the top of a hand processing workflow, after cached fim has been loaded (from ras2fim and/or hand, via SQL in the postprocess_sql lambda function), and performs two main operations: +# 1. Query the vizprocessing database for flows to use for FIM (forecast flows for regurlar runs, recurrence flows for AEP FIM, rfc_categorical_flows for CATFIM) +# - If a specific sql file is present in the flows_sql folder, the function will use that to query flows form the RDS db. If a file is not present, it will use a template file in the templates_sql folder. +# 2. Setup appropriate groups of HUC8s to delegate the FIM extent generation for those flows to the hand_processing step function. +# - This function is also called within each huc processing group to get the branch interation, as noted in labmda_handler above. def setup_huc_inundation(event): + + # Get relevant variables from the Step Function json fim_config = event['args']['fim_config'] fim_config_name = fim_config['name'] - fim_config_sql = fim_config['sql_file'] + # fim_publish_db_type = fim_config['publish_db'] # TODO: Add this to step function only to pass to hand_processing? target_table = fim_config['target_table'] product = event['args']['product']['product'] configuration = event['args']['product']['configuration'] reference_time = event['args']['reference_time'] reference_date = datetime.datetime.strptime(reference_time, "%Y-%m-%d %H:%M:%S") + date = reference_date.strftime("%Y%m%d") + hour = reference_date.strftime("%H") sql_replace = event['args']['sql_rename_dict'] one_off = event['args'].get("hucs") process_by = fim_config.get('process_by', ['huc']) - if fim_config.get("states_to_run"): - states_to_run = fim_config.get("states_to_run") - else: - states_to_run = [] - - reference_service = True if configuration == "reference" else False - - if sql_replace.get(target_table): - target_table = sql_replace.get(target_table) - - print(f"Running FIM for {configuration} for {reference_time}") - viz_db = database(db_type="viz") - egis_db = database(db_type="egis") - if reference_service: - process_db = egis_db - else: - process_db = viz_db - - # Find the sql file, and replace any items in the dictionary - sql_path = f'data_sql/{fim_config_sql}.sql' - - # Checks if all tables references in sql file exist and are updated (if applicable) - # Raises a custom RequiredTableNotUpdated if not, which will be caught by viz_pipline - # and invoke a retry - viz_db.check_required_tables_updated(sql_path, sql_replace, reference_time, raise_if_false=True) - - sql = open(sql_path, 'r').read() - # replace portions of SQL with any items in the dictionary (at least has reference_time) - # sort the replace dictionary to have longer values upfront first - sql_replace_sorted = sorted(sql_replace.items(), key = lambda item : len(item[1]), reverse = True) - for word, replacement in sql_replace_sorted: - sql = re.sub(re.escape(word), replacement, sql, flags=re.IGNORECASE).replace('utc', 'UTC') - - setup_db_table(target_table, reference_time, viz_db, process_db, sql_replace) - - # If only running select states, add additional where clauses to the SQL - if len(states_to_run) > 0: - additional_where_clauses = " AND (channels.state = '" - for i, state in enumerate(states_to_run): - additional_where_clauses += state - if i+1 < len(states_to_run): - additional_where_clauses += "' OR channels.state = '" - else: - additional_where_clauses += "')" - sql += additional_where_clauses - - if "rfc" in fim_config_name: - alias = 'max_forecast' if 'max_forecast' in sql else 'rnr' - if sql.strip().endswith(';'): - sql = sql.replace(';', f' group by {alias}.feature_id, streamflow_cms') - else: - sql += f" group by {alias}.feature_id, streamflow_cms" - - sql = sql.replace(";", "") - fim_type = fim_config['fim_type'] - if fim_type == "coastal": - print("Running coastal SCHISM workflow") - hucs = viz_db.run_sql_in_db(sql) - hucs = list(hucs['huc'].values) - - return_object = { - 'hucs_to_process': hucs, - 'data_bucket': PROCESSED_OUTPUT_BUCKET, - 'data_prefix': PROCESSED_OUTPUT_PREFIX - } - else: - print("Running inland HAND workflow") - - # Parses the forecast key to get the necessary metadata for the output file - date = reference_date.strftime("%Y%m%d") - hour = reference_date.strftime("%H") - - ras_publish_table = get_valid_ras2fim_models(sql, target_table, reference_time, viz_db, egis_db, reference_service) - df_streamflows = get_features_for_HAND_processing(sql, ras_publish_table, viz_db) - processing_groups = df_streamflows.groupby(process_by) - - print(f"Kicking off {len(processing_groups)} processing groups for {product} for {reference_time}") + print(f"Running FIM for {configuration} for {reference_time}") + # Initilize the database class for relevant databases + viz_db = database(db_type="viz") # we always need the vizprocessing database to get flows data. - for group_vals, group_df in processing_groups: - if one_off and group_vals not in one_off: - continue - if group_df.empty: - continue - if isinstance(group_vals, str): - group_vals = [group_vals] - - csv_key = write_data_csv_file(product, fim_config_name, date, hour, group_vals, group_df) - - s3_keys = [] - df_streamflows = df_streamflows.drop_duplicates(process_by + ["huc8_branch"]) - df_streamflows_split = [df_split for df_split in np.array_split(df_streamflows[process_by + ["huc8_branch"]], 20) if not df_split.empty] - - for index, df in enumerate(df_streamflows_split): - # Key for the csv file that will be stored in S3 - csv_key = f"{PROCESSED_OUTPUT_PREFIX}/{product}/{fim_config_name}/workspace/{date}/{hour}/hucs_to_process_{index}.csv" - s3_keys.append(csv_key) - - # Save the dataframe as a local netcdf file - tmp_csv = f'/tmp/{product}.csv' - df.to_csv(tmp_csv, index=False) + print("Determing features to be processed by HAND") + # Query flows data from the vizprocessing database, using the SQL defined above. + # TODO: Update this for RFC, CatFIM, and AEP, and Catchments services by adding the creation of flows tables to postprocess_sql + hand_sql = open("templates_sql/hand_features.sql", 'r').read() + hand_sql = hand_sql.replace("{db_fim_table}", target_table) + df_streamflows = viz_db.run_sql_in_db(hand_sql) + + # Split reaches with flows into processing groups, and write two sets of csv files to S3 (we need to write to csvs to not exceed the limit of what can be passed in the step function): + # This first loop splits up the number of huc8_branch combinations into X even 'hucs_to_process' groups, in order to parallel process groups in a step function map, and writes those to csv files on S3. + s3_keys = [] + df_huc8_branches = df_streamflows.drop_duplicates(process_by + ["huc8_branch"]) + df_huc8_branches_split = [df_split for df_split in np.array_split(df_huc8_branches[process_by + ["huc8_branch"]], hand_processing_parallel_groups) if not df_split.empty] + for index, df in enumerate(df_huc8_branches_split): + # Key for the csv file that will be stored in S3 + csv_key = f"{PROCESSED_OUTPUT_PREFIX}/{product}/{fim_config_name}/workspace/{date}/{hour}/hucs_to_process_{index}.csv" + s3_keys.append(csv_key) + + # Save the dataframe as a local netcdf file + tmp_csv = f'/tmp/{product}.csv' + df.to_csv(tmp_csv, index=False) + + # Upload the csv file into S3 + print(f"Uploading {csv_key}") + S3.upload_file(tmp_csv, PROCESSED_OUTPUT_BUCKET, csv_key) + os.remove(tmp_csv) - # Upload the csv file into S3 - print(f"Uploading {csv_key}") - S3.upload_file(tmp_csv, PROCESSED_OUTPUT_BUCKET, csv_key) - os.remove(tmp_csv) + # This second loop writes the actual reaches/flows data to a csv file for each huc8_branch, using the write_flows_data_csv_file function defined below. + processing_groups = df_streamflows.groupby(process_by) + print(f"{len(df_streamflows)} Total Features for {product} HAND Processing for Reference Time:{reference_time} - Setting up {len(processing_groups)} processing groups.") + for group_vals, group_df in processing_groups: + if one_off and group_vals not in one_off: + continue + if group_df.empty: + continue + if isinstance(group_vals, str): + group_vals = [group_vals] + csv_key = write_flows_data_csv_file(product, fim_config_name, date, hour, group_vals, group_df) - return_object = { - 'hucs_to_process': s3_keys, - 'data_bucket': PROCESSED_OUTPUT_BUCKET, - 'data_prefix': PROCESSED_OUTPUT_PREFIX - } - - return return_object - - -def get_branch_iteration(event): - local_data_file = os.path.join("/tmp", os.path.basename(event['args']['huc_branches_to_process'])) - S3.download_file(event['args']['data_bucket'], event['args']['huc_branches_to_process'], local_data_file) - df = pd.read_csv(local_data_file) - df['huc'] = df['huc'].astype(str).str.zfill(6) - os.remove(local_data_file) - return_object = { - "huc_branches_to_process": df.to_dict("records") + 'hucs_to_process': s3_keys, + 'data_bucket': PROCESSED_OUTPUT_BUCKET, + 'data_prefix': PROCESSED_OUTPUT_PREFIX } - - return return_object - -def setup_db_table(db_fim_table, reference_time, viz_db, process_db, sql_replace=None): - """ - Sets up the necessary tables in a postgis data for later ingest from the huc processing functions - - Args: - configuration(str): product configuration for the product being ran (i.e. srf, srf_hi, etc) - reference_time(str): Reference time of the data being ran - sql_replace(dict): An optional dictionary by which to use to create a new table if needed - """ - index_name = f"idx_{db_fim_table.split('.')[-1:].pop()}_hydro_id" - db_schema = db_fim_table.split('.')[0] - - print(f"Setting up {db_fim_table}") - - with viz_db.get_db_connection() as connection: - cur = connection.cursor() - - # Add a row to the ingest status table indicating that an import has started. - SQL = f"INSERT INTO admin.ingest_status (target, reference_time, status, update_time) " \ - f"VALUES ('{db_fim_table}', '{reference_time}', 'Import Started', " \ - f"'{datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')}')" - cur.execute(SQL) - - with process_db.get_db_connection() as connection: - cur = connection.cursor() - - # See if the target table exists #TODO: Ensure table exists would make a good helper function - cur.execute(f"SELECT EXISTS (SELECT FROM pg_tables WHERE schemaname = '{db_fim_table.split('.')[0]}' AND tablename = '{db_fim_table.split('.')[1]}');") - table_exists = cur.fetchone()[0] - - # If the target table doesn't exist, create one basd on the sql_replace dict. - if not table_exists: - print(f"--> {db_fim_table} does not exist. Creating now.") - original_table = list(sql_replace.keys())[list(sql_replace.values()).index(db_fim_table)] #ToDo: error handling if not in list - cur.execute(f"DROP TABLE IF EXISTS {db_fim_table}; CREATE TABLE {db_fim_table} (LIKE {original_table})") - connection.commit() - - # Drop the existing index on the target table - print("Dropping target table index (if exists).") - SQL = f"DROP INDEX IF EXISTS {db_schema}.{index_name};" - cur.execute(SQL) - - # Truncate all records. - print("Truncating target table.") - SQL = f"TRUNCATE TABLE {db_fim_table};" - cur.execute(SQL) - connection.commit() - - return db_fim_table - -def write_data_csv_file(product, fim_config_name, date, hour, identifiers, huc_data): - ''' - Write the subsetted streamflow data to a csv so that the huc processing lambdas can grab it - - Args: - huc(str): HUC that will be processed - filename(str): Forecast file that was used - huc_data(pandas.datafrm): Dataframe subsetted for the specific huc + return return_object - Returns: - data_json_key(str): key (path) to the json file in the workspace folder - ''' +################################################################################################################################################################# +def write_flows_data_csv_file(product, fim_config_name, date, hour, identifiers, huc_data): s3_path_piece = '/'.join(identifiers) # Key for the csv file that will be stored in S3 csv_key = f"{PROCESSED_OUTPUT_PREFIX}/{product}/{fim_config_name}/workspace/{date}/{hour}/data/{s3_path_piece}_data.csv" @@ -245,98 +108,23 @@ def write_data_csv_file(product, fim_config_name, date, hour, identifiers, huc_d huc_data.to_csv(tmp_csv, index=False) # Upload the csv file into S3 - print(f"Uploading {csv_key}") + print(f"Uploading {csv_key} - {len(huc_data)} features.") S3.upload_file(tmp_csv, PROCESSED_OUTPUT_BUCKET, csv_key) os.remove(tmp_csv) return csv_key - -def get_valid_ras2fim_models(streamflow_sql, db_fim_table, reference_time, viz_db, egis_db, reference_service): - - if "flow_based_catfim" in db_fim_table: - ras_insertion_template = f'templates_sql/ras2fim_insertion_for_flow_based_catfim.sql' - elif "stage_based_catfim" in db_fim_table: - ras_insertion_template = f'templates_sql/ras2fim_insertion_for_stage_based_catfim.sql' - else: - ras_insertion_template = f'templates_sql/ras2fim_insertion.sql' - - ras_insertion_sql = open(ras_insertion_template, 'r').read() - ras_insertion_sql = ras_insertion_sql \ - .replace("{streamflow_sql}", streamflow_sql) \ - .replace("{db_fim_table}", db_fim_table) \ - .replace("{reference_time}", reference_time) - - publish_table = db_fim_table - if reference_service: - table = db_fim_table.split('.')[-1] - publish_table = f"publish.{table}" - ras_insertion_sql = ras_insertion_sql.replace(db_fim_table, publish_table) - - print(f"Adding ras2fim models to {db_fim_table}") - - with viz_db.get_db_connection() as connection: - cur = connection.cursor() - cur.execute(ras_insertion_sql) - if reference_service: - with viz_db.get_db_connection() as db_connection, db_connection.cursor() as cur: - cur.execute(f"SELECT * FROM publish.{table} LIMIT 1") - column_names = [desc[0] for desc in cur.description] - columns = ', '.join(column_names) - - print(f"Copying {publish_table} to {db_fim_table}") - try: # Try copying the data - copy_data_to_egis(egis_db, origin_table=f"vizprc_publish.{table}", dest_table=db_fim_table, columns=columns, add_oid=True) #Copy the publish table from the vizprc db to the egis db, using fdw - except Exception as e: # If it doesn't work initially, try refreshing the foreign schema and try again. - refresh_fdw_schema(egis_db, local_schema="vizprc_publish", remote_server="vizprc_db", remote_schema="publish") #Update the foreign data schema - we really don't need to run this all the time, but it's fast, so I'm trying it. - copy_data_to_egis(egis_db, origin_table=f"vizprc_publish.{table}", dest_table=db_fim_table, columns=columns, add_oid=True) #Copy the publish table from the vizprc db to the egis db, using fdw - - return publish_table - -def get_features_for_HAND_processing(streamflow_sql, db_fim_table, viz_db): - - if "flow_based_catfim" in db_fim_table: - hand_features_template = f'templates_sql/hand_features_for_flow_based_catfim.sql' - elif "stage_based_catfim" in db_fim_table: - hand_features_template = f'templates_sql/hand_features_for_stage_based_catfim.sql' - else: - hand_features_template = f'templates_sql/hand_features.sql' - - hand_sql = open(hand_features_template, 'r').read() - hand_sql = hand_sql \ - .replace("{streamflow_sql}", streamflow_sql) \ - .replace("{db_fim_table}", db_fim_table) - - print("Determing features to be processed by HAND") - df_hand = viz_db.run_sql_in_db(hand_sql) - - return df_hand - - -def copy_data_to_egis(db, origin_table, dest_table, columns, add_oid=True, add_geom_index=True, update_srid=None): +################################################################################################################################################################# +# This function loads a hucs_to_process csv file from S3 (which was generated in the first invokation of this function in the setup_huc_inundation function above.) +def get_branch_iteration(event): + local_data_file = os.path.join("/tmp", os.path.basename(event['args']['huc_branches_to_process'])) + S3.download_file(event['args']['data_bucket'], event['args']['huc_branches_to_process'], local_data_file) + df = pd.read_csv(local_data_file) + df['huc'] = df['huc'].astype(str).str.zfill(6) + os.remove(local_data_file) - with db.get_db_connection() as db_connection, db_connection.cursor() as cur: - cur.execute(f"DROP TABLE IF EXISTS {dest_table};") - cur.execute(f"SELECT {columns} INTO {dest_table} FROM {origin_table};") + return_object = { + "huc_branches_to_process": df.to_dict("records") + } - if add_oid: - print(f"---> Adding an OID to the {dest_table}") - cur.execute(f'ALTER TABLE {dest_table} ADD COLUMN OID SERIAL PRIMARY KEY;') - if add_geom_index and "geom" in columns: - print(f"---> Adding an spatial index to the {dest_table}") - cur.execute(f'CREATE INDEX ON {dest_table} USING GIST (geom);') # Add a spatial index - if 'geom_xy' in columns: - cur.execute(f'CREATE INDEX ON {dest_table} USING GIST (geom_xy);') # Add a spatial index to geometry point layer, if present. - if update_srid and "geom" in columns: - print(f"---> Updating SRID to {update_srid}") - cur.execute(f"SELECT UpdateGeometrySRID('{dest_table.split('.')[0]}', '{dest_table.split('.')[1]}', 'geom', {update_srid});") - -def refresh_fdw_schema(db, local_schema, remote_server, remote_schema): - with db.get_db_connection() as db_connection, db_connection.cursor() as cur: - sql = f""" - DROP SCHEMA IF EXISTS {local_schema} CASCADE; - CREATE SCHEMA {local_schema}; - IMPORT FOREIGN SCHEMA {remote_schema} FROM SERVER {remote_server} INTO {local_schema}; - """ - cur.execute(sql) - print(f"---> Refreshed {local_schema} foreign schema.") \ No newline at end of file + return return_object \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features.sql index 6081092c..a4601b65 100644 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features.sql +++ b/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features.sql @@ -1,17 +1,12 @@ -WITH feature_streamflows as ( - {streamflow_sql} -) - SELECT - crosswalk.feature_id, - CONCAT(LPAD(crosswalk.huc8::text, 8, '0'), '-', crosswalk.branch_id) as huc8_branch, - LEFT(LPAD(crosswalk.huc8::text, 8, '0'), 6) as huc, - crosswalk.hydro_id, - fs.streamflow_cms -FROM derived.fim4_featureid_crosswalk AS crosswalk -JOIN feature_streamflows fs ON fs.feature_id = crosswalk.feature_id -LEFT JOIN {db_fim_table} r2f ON r2f.feature_id = crosswalk.feature_id + fs.feature_id, + CONCAT(LPAD(fs.huc8::text, 8, '0'), '-', fs.branch) as huc8_branch, + LEFT(LPAD(fs.huc8::text, 8, '0'), 6) as huc, + fs.hydro_id, + fs.discharge_cms AS streamflow_cms --TODO: Update here and in lambda to discharge +FROM {db_fim_table}_flows fs +LEFT JOIN {db_fim_table} fim ON fim.feature_id = fs.feature_id AND fim.hydro_id = fs.hydro_id AND fim.huc8 = fs.huc8 AND fim.branch = fs.branch +LEFT JOIN {db_fim_table}_zero_stage zs ON zs.feature_id = fs.feature_id AND zs.hydro_id = fs.hydro_id AND zs.huc8 = fs.huc8 AND zs.branch = fs.branch WHERE - crosswalk.huc8 IS NOT NULL AND - crosswalk.lake_id = -999 AND - r2f.feature_id IS NULL \ No newline at end of file + fim.fim_version IS NULL AND + zs.rc_discharge_cms IS NULL \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features_for_flow_based_catfim.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features_for_flow_based_catfim.sql deleted file mode 100644 index 3030a01d..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features_for_flow_based_catfim.sql +++ /dev/null @@ -1,18 +0,0 @@ -WITH feature_streamflows as ( - {streamflow_sql} -) - -SELECT - crosswalk.feature_id, - CONCAT(LPAD(crosswalk.huc8::text, 8, '0'), '-', crosswalk.branch_id) as huc8_branch, - LEFT(LPAD(crosswalk.huc8::text, 8, '0'), 6) as huc, - crosswalk.hydro_id, - fs.streamflow_cms, - fs.nws_station_id -FROM derived.fim4_featureid_crosswalk AS crosswalk -JOIN feature_streamflows fs ON fs.feature_id = crosswalk.feature_id -LEFT JOIN {db_fim_table} r2f ON r2f.feature_id = crosswalk.feature_id -WHERE - crosswalk.huc8 IS NOT NULL AND - crosswalk.lake_id = -999 AND - r2f.feature_id IS NULL \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features_for_stage_based_catfim.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features_for_stage_based_catfim.sql deleted file mode 100644 index 14602283..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/hand_features_for_stage_based_catfim.sql +++ /dev/null @@ -1,18 +0,0 @@ -WITH feature_streamflows as ( - {streamflow_sql} -) - -SELECT - crosswalk.feature_id, - CONCAT(LPAD(crosswalk.huc8::text, 8, '0'), '-', crosswalk.branch_id) as huc8_branch, - LEFT(LPAD(crosswalk.huc8::text, 8, '0'), 6) as huc, - crosswalk.hydro_id, - fs.stage_m, - fs.nws_station_id -FROM derived.fim4_featureid_crosswalk AS crosswalk -JOIN feature_streamflows fs ON fs.feature_id = crosswalk.feature_id -LEFT JOIN {db_fim_table} r2f ON r2f.feature_id = crosswalk.feature_id -WHERE - crosswalk.huc8 IS NOT NULL AND - crosswalk.lake_id = -999 AND - r2f.feature_id IS NULL \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion.sql deleted file mode 100644 index aedc4225..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion.sql +++ /dev/null @@ -1,28 +0,0 @@ -WITH feature_streamflows as ( - {streamflow_sql} -) - -INSERT INTO {db_fim_table}( - hydro_id, hydro_id_str, geom, feature_id, feature_id_str, - streamflow_cfs, fim_stage_ft, max_rc_stage_ft, max_rc_discharge_cfs, - fim_version, reference_time, huc8, branch -) -SELECT - gc.feature_id as hydro_id, - gc.feature_id::TEXT as hydro_id_str, - ST_Transform(gc.geom, 3857) as geom, - gc.feature_id as feature_id, - gc.feature_id::TEXT as feature_id_str, - ROUND((fs.streamflow_cms * 35.315)::numeric, 2) as streamflow_cfs, - gc.stage_ft as fim_stage_ft, - mgc.max_rc_stage_ft, - mgc.max_rc_discharge_cfs, - CONCAT ('ras2fim_', gc.version) as fim_version, - '{reference_time}' as reference_time, - fhc.huc8, - NULL as branch -FROM ras2fim.geocurves gc -JOIN feature_streamflows fs ON fs.feature_id = gc.feature_id -JOIN derived.featureid_huc_crosswalk fhc ON fs.feature_id = fhc.feature_id -JOIN ras2fim.max_geocurves mgc ON gc.feature_id = mgc.feature_id -WHERE gc.discharge_cms >= fs.streamflow_cms AND gc.previous_discharge_cms < fs.streamflow_cms \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion_for_flow_based_catfim.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion_for_flow_based_catfim.sql deleted file mode 100644 index 326954a4..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion_for_flow_based_catfim.sql +++ /dev/null @@ -1,29 +0,0 @@ -WITH feature_streamflows as ( - {streamflow_sql} -) - -INSERT INTO {db_fim_table}( - nws_station_id, hydro_id, hydro_id_str, geom, feature_id, feature_id_str, - streamflow_cfs, fim_stage_ft, max_rc_stage_ft, max_rc_discharge_cfs, - fim_version, reference_time, huc8, branch -) -SELECT - nws_station_id, - gc.feature_id as hydro_id, - gc.feature_id::TEXT as hydro_id_str, - ST_Transform(gc.geom, 3857) as geom, - gc.feature_id as feature_id, - gc.feature_id::TEXT as feature_id_str, - ROUND((fs.streamflow_cms * 35.315)::numeric, 2) as streamflow_cfs, - gc.stage_ft as fim_stage_ft, - mgc.max_rc_stage_ft, - mgc.max_rc_discharge_cfs, - CONCAT ('ras2fim_', gc.version) as fim_version, - '{reference_time}' as reference_time, - fhc.huc8, - NULL as branch -FROM ras2fim.geocurves gc -JOIN feature_streamflows fs ON fs.feature_id = gc.feature_id -JOIN derived.featureid_huc_crosswalk fhc ON fs.feature_id = fhc.feature_id -JOIN ras2fim.max_geocurves mgc ON gc.feature_id = mgc.feature_id -WHERE gc.discharge_cms >= fs.streamflow_cms AND gc.previous_discharge_cms < fs.streamflow_cms \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion_for_stage_based_catfim.sql b/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion_for_stage_based_catfim.sql deleted file mode 100644 index 803624bf..00000000 --- a/Core/LAMBDA/viz_functions/viz_fim_data_prep/templates_sql/ras2fim_insertion_for_stage_based_catfim.sql +++ /dev/null @@ -1,28 +0,0 @@ -WITH feature_streamflows as ( - {streamflow_sql} -) - -INSERT INTO {db_fim_table}( - nws_station_id, hydro_id, hydro_id_str, geom, feature_id, feature_id_str, - fim_stage_ft, max_rc_stage_ft, max_rc_discharge_cfs, - fim_version, reference_time, huc8, branch -) -SELECT - nws_station_id, - gc.feature_id as hydro_id, - gc.feature_id::TEXT as hydro_id_str, - ST_Transform(gc.geom, 3857) as geom, - gc.feature_id as feature_id, - gc.feature_id::TEXT as feature_id_str, - gc.stage_ft as fim_stage_ft, - mgc.max_rc_stage_ft, - mgc.max_rc_discharge_cfs, - CONCAT ('ras2fim_', gc.version) as fim_version, - '{reference_time}' as reference_time, - fhc.huc8, - NULL as branch -FROM ras2fim.geocurves gc -JOIN feature_streamflows fs ON fs.feature_id = gc.feature_id -JOIN derived.featureid_huc_crosswalk fhc ON fs.feature_id = fhc.feature_id -JOIN ras2fim.max_geocurves mgc ON gc.feature_id = mgc.feature_id -WHERE gc.stage_m >= fs.stage_m AND gc.previous_stage_m < fs.stage_m \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim/ana_inundation.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim/ana_inundation.yml index da515327..c224e8aa 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim/ana_inundation.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim/ana_inundation.yml @@ -1,6 +1,7 @@ product: ana_inundation configuration: analysis_assim product_type: "fim" +domain: conus run: true ingest_files: @@ -19,6 +20,7 @@ db_max_flows: fim_configs: - name: ana_inundation + flows_table: cache.max_flows_ana target_table: ingest.ana_inundation fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim/ana_past_14day_max_inundation.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim/ana_past_14day_max_inundation.yml index 30feca3c..0fd08c87 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim/ana_past_14day_max_inundation.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim/ana_past_14day_max_inundation.yml @@ -1,6 +1,7 @@ product: ana_past_14day_max_inundation configuration: analysis_assim product_type: "fim" +domain: conus run: true run_times: - '00:00' @@ -35,12 +36,14 @@ db_max_flows: fim_configs: - name: ana_past_7day_max_inundation + flows_table: cache.max_flows_ana_7day target_table: ingest.ana_past_7day_max_inundation fim_type: hand postprocess: sql_file: ana_past_7day_max_inundation target_table: publish.ana_past_7day_max_inundation - name: ana_past_14day_max_inundation + flows_table: cache.max_flows_ana_14day target_table: ingest.ana_past_14day_max_inundation fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim_hawaii/ana_inundation_hi.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim_hawaii/ana_inundation_hi.yml index 4e6ea752..083de058 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim_hawaii/ana_inundation_hi.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim_hawaii/ana_inundation_hi.yml @@ -1,6 +1,7 @@ product: ana_inundation_hi configuration: analysis_assim_hawaii product_type: "fim" +domain: hi run: true ingest_files: @@ -19,6 +20,7 @@ db_max_flows: fim_configs: - name: ana_inundation_hi + flows_table: cache.max_flows_ana_hi target_table: ingest.ana_inundation_hi fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim_puertorico/ana_inundation_prvi.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim_puertorico/ana_inundation_prvi.yml index 4a6441e8..bf9957fa 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim_puertorico/ana_inundation_prvi.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/analysis_assim_puertorico/ana_inundation_prvi.yml @@ -1,6 +1,7 @@ product: ana_inundation_prvi configuration: analysis_assim_puertorico product_type: "fim" +domain: prvi run: true ingest_files: @@ -19,6 +20,7 @@ db_max_flows: fim_configs: - name: ana_inundation_prvi + flows_table: cache.max_flows_ana_prvi target_table: ingest.ana_inundation_prvi fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_alaska_mem1/mrf_gfs_10day_peak_flow_arrival_time_ak.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_alaska_mem1/mrf_gfs_10day_peak_flow_arrival_time_ak.yml index cb1a61a6..50b7caab 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_alaska_mem1/mrf_gfs_10day_peak_flow_arrival_time_ak.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_alaska_mem1/mrf_gfs_10day_peak_flow_arrival_time_ak.yml @@ -11,11 +11,11 @@ ingest_files: target_keys: (feature_id, streamflow) db_max_flows: - - name: mrf_gfs_max_flows_ak - target_table: cache.mrf_gfs_max_flows_ak + - name: mrf_gfs_10day_max_flows_ak + target_table: cache.mrf_gfs_10day_max_flows_ak target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_gfs_max_flows_ak + max_flows_sql_file: mrf_gfs_10day_max_flows_ak postprocess_sql: - sql_file: mrf_gfs_10day_peak_flow_arrival_time_alaska diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_max_high_flow_magnitude.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_max_high_flow_magnitude.yml index e7d48c0b..f91893d6 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_max_high_flow_magnitude.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_max_high_flow_magnitude.yml @@ -12,11 +12,21 @@ ingest_files: dependent_on: publish.mrf_gfs_max_inundation_10day_hucs # this will pause the pipeline until this table is updated, causing nbm to run after gfs (instead of at the same time) db_max_flows: - - name: mrf_nbm_max_flows - target_table: cache.max_flows_mrf_nbm + - name: mrf_nbm_3day_max_flows + target_table: cache.max_flows_mrf_nbm_3day target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_nbm_max_flows + max_flows_sql_file: mrf_nbm_3day_max_flows + - name: mrf_nbm_5day_max_flows + target_table: cache.max_flows_mrf_nbm_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_nbm_5day_max_flows + - name: mrf_nbm_10day_max_flows + target_table: cache.max_flows_mrf_nbm_10day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_nbm_10day_max_flows postprocess_sql: - sql_file: mrf_nbm_10day_max_high_flow_magnitude diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_max_inundation.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_max_inundation.yml index 3e667e76..93430b59 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_max_inundation.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_max_inundation.yml @@ -1,6 +1,7 @@ product: mrf_nbm_10day_max_inundation configuration: medium_range_blend product_type: "fim" +domain: conus run: true ingest_files: @@ -12,26 +13,39 @@ ingest_files: dependent_on: publish.mrf_gfs_max_inundation_10day_hucs # this will pause the pipeline until this table is updated, causing nbm to run after gfs (instead of at the same time) db_max_flows: - - name: mrf_nbm_max_flows - target_table: cache.max_flows_mrf_nbm + - name: mrf_nbm_3day_max_flows + target_table: cache.max_flows_mrf_nbm_3day target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_nbm_max_flows + max_flows_sql_file: mrf_nbm_3day_max_flows + - name: mrf_nbm_5day_max_flows + target_table: cache.max_flows_mrf_nbm_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_nbm_5day_max_flows + - name: mrf_nbm_10day_max_flows + target_table: cache.max_flows_mrf_nbm_10day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_nbm_10day_max_flows fim_configs: - name: mrf_nbm_max_inundation_3day + flows_table: cache.max_flows_mrf_nbm_3day target_table: ingest.mrf_nbm_max_inundation_3day fim_type: hand postprocess: sql_file: mrf_nbm_max_inundation_3day target_table: publish.mrf_nbm_max_inundation_3day - name: mrf_nbm_max_inundation_5day + flows_table: cache.max_flows_mrf_nbm_5day target_table: ingest.mrf_nbm_max_inundation_5day fim_type: hand postprocess: sql_file: mrf_nbm_max_inundation_5day target_table: publish.mrf_nbm_max_inundation_5day - name: mrf_nbm_max_inundation_10day + flows_table: cache.max_flows_mrf_nbm_10day target_table: ingest.mrf_nbm_max_inundation_10day fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_peak_flow_arrival_time.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_peak_flow_arrival_time.yml index 43b15f88..d92bda0f 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_peak_flow_arrival_time.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_peak_flow_arrival_time.yml @@ -12,11 +12,11 @@ ingest_files: dependent_on: publish.mrf_gfs_max_inundation_10day_hucs # this will pause the pipeline until this table is updated, causing nbm to run after gfs (instead of at the same time) db_max_flows: - - name: mrf_nbm_max_flows - target_table: cache.max_flows_mrf_nbm + - name: mrf_nbm_10day_max_flows + target_table: cache.max_flows_mrf_nbm_10day target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_nbm_max_flows + max_flows_sql_file: mrf_nbm_10day_max_flows postprocess_sql: - sql_file: mrf_nbm_10day_peak_flow_arrival_time diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_rapid_onset_flooding.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_rapid_onset_flooding.yml index 96c7c974..0e3a17fa 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_rapid_onset_flooding.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend/mrf_nbm_10day_rapid_onset_flooding.yml @@ -12,11 +12,11 @@ ingest_files: dependent_on: publish.mrf_gfs_max_inundation_10day_hucs # this will pause the pipeline until this table is updated, causing nbm to run after gfs (instead of at the same time) db_max_flows: - - name: mrf_nbm_max_flows - target_table: cache.max_flows_mrf_nbm + - name: mrf_nbm_10day_max_flows + target_table: cache.max_flows_mrf_nbm_10day target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_nbm_max_flows + max_flows_sql_file: mrf_nbm_10day_max_flows postprocess_sql: - sql_file: mrf_nbm_10day_rapid_onset_flooding diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend_alaska/mrf_nbm_10day_peak_flow_arrival_time_ak.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend_alaska/mrf_nbm_10day_peak_flow_arrival_time_ak.yml index d7370d64..40b4459e 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend_alaska/mrf_nbm_10day_peak_flow_arrival_time_ak.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_blend_alaska/mrf_nbm_10day_peak_flow_arrival_time_ak.yml @@ -11,13 +11,12 @@ ingest_files: target_keys: (feature_id, streamflow) dependent_on: publish.mrf_gfs_10day_peak_flow_arrival_time_alaska # this will pause the pipeline until this table is updated, causing nbm to run after gfs (instead of at the same time) - db_max_flows: - - name: mrf_nbm_max_flows_ak - target_table: cache.mrf_nbm_max_flows_ak + - name: mrf_nbm_10day_max_flows_ak + target_table: cache.mrf_nbm_10day_max_flows_ak target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_nbm_max_flows_ak + max_flows_sql_file: mrf_nbm_10day_max_flows_ak postprocess_sql: - sql_file: mrf_nbm_10day_peak_flow_arrival_time_alaska diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_ensemble/mrf_gfs_5day_max_inundation_probability.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_ensemble/mrf_gfs_5day_max_inundation_probability.yml new file mode 100644 index 00000000..698626fa --- /dev/null +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_ensemble/mrf_gfs_5day_max_inundation_probability.yml @@ -0,0 +1,96 @@ +product: mrf_gfs_5day_max_inundation_probability +configuration: medium_range_ensemble +product_type: "fim" +domain: conus +run: true + +ingest_files: + - file_format: common/data/model/com/nwm/{{variable:NWM_DATAFLOW_VERSION}}/nwm.{{datetime:%Y%m%d}}/medium_range_mem2/nwm.t{{datetime:%H}}z.medium_range.channel_rt_2.f{{range:3,121,3,%03d}}.conus.nc + file_step: None + file_window: None + target_table: ingest.nwm_channel_rt_mrf_gfs_mem2 + target_keys: (feature_id, streamflow) + - file_format: common/data/model/com/nwm/{{variable:NWM_DATAFLOW_VERSION}}/nwm.{{datetime:%Y%m%d}}/medium_range_mem3/nwm.t{{datetime:%H}}z.medium_range.channel_rt_3.f{{range:3,121,3,%03d}}.conus.nc + file_step: None + file_window: None + target_table: ingest.nwm_channel_rt_mrf_gfs_mem3 + target_keys: (feature_id, streamflow) + - file_format: common/data/model/com/nwm/{{variable:NWM_DATAFLOW_VERSION}}/nwm.{{datetime:%Y%m%d}}/medium_range_mem4/nwm.t{{datetime:%H}}z.medium_range.channel_rt_4.f{{range:3,121,3,%03d}}.conus.nc + file_step: None + file_window: None + target_table: ingest.nwm_channel_rt_mrf_gfs_mem4 + target_keys: (feature_id, streamflow) + - file_format: common/data/model/com/nwm/{{variable:NWM_DATAFLOW_VERSION}}/nwm.{{datetime:%Y%m%d}}/medium_range_mem5/nwm.t{{datetime:%H}}z.medium_range.channel_rt_5.f{{range:3,121,3,%03d}}.conus.nc + file_step: None + file_window: None + target_table: ingest.nwm_channel_rt_mrf_gfs_mem5 + target_keys: (feature_id, streamflow) + - file_format: common/data/model/com/nwm/{{variable:NWM_DATAFLOW_VERSION}}/nwm.{{datetime:%Y%m%d}}/medium_range_mem6/nwm.t{{datetime:%H}}z.medium_range.channel_rt_6.f{{range:3,121,3,%03d}}.conus.nc + file_step: None + file_window: None + target_table: ingest.nwm_channel_rt_mrf_gfs_mem6 + target_keys: (feature_id, streamflow) + +db_max_flows: + - name: mrf_gfs_mem2_5day_max_flows + target_table: cache.max_flows_mrf_gfs_mem2_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_mem2_5day_max_flows + - name: mrf_gfs_mem3_5day_max_flows + target_table: cache.max_flows_mrf_gfs_mem3_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_mem3_5day_max_flows + - name: mrf_gfs_mem4_5day_max_flows + target_table: cache.max_flows_mrf_gfs_mem4_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_mem4_5day_max_flows + - name: mrf_gfs_mem5_5day_max_flows + target_table: cache.max_flows_mrf_gfs_mem5_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_mem5_5day_max_flows + - name: mrf_gfs_mem6_5day_max_flows + target_table: cache.max_flows_mrf_gfs_mem6_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_mem6_5day_max_flows + +fim_configs: + - name: mrf_gfs_mem2_max_inundation_5day + flows_table: cache.max_flows_mrf_gfs_mem2_5day + target_table: ingest.mrf_gfs_mem2_max_inundation_5day + fim_type: hand + postprocess: + sql_file: mrf_gfs_mem2_max_inundation_5day + target_table: publish.mrf_gfs_mem2_max_inundation_5day + - name: mrf_gfs_mem3_max_inundation_5day + flows_table: cache.max_flows_mrf_gfs_mem3_5day + target_table: ingest.mrf_gfs_mem3_max_inundation_5day + fim_type: hand + postprocess: + sql_file: mrf_gfs_mem3_max_inundation_5day + target_table: publish.mrf_gfs_mem3_max_inundation_5day + - name: mrf_gfs_mem4_max_inundation_5day + flows_table: cache.max_flows_mrf_gfs_mem4_5day + target_table: ingest.mrf_gfs_mem4_max_inundation_5day + fim_type: hand + postprocess: + sql_file: mrf_gfs_mem4_max_inundation_5day + target_table: publish.mrf_gfs_mem4_max_inundation_5day + - name: mrf_gfs_mem5_max_inundation_5day + flows_table: cache.max_flows_mrf_gfs_mem5_5day + target_table: ingest.mrf_gfs_mem5_max_inundation_5day + fim_type: hand + postprocess: + sql_file: mrf_gfs_mem5_max_inundation_5day + target_table: publish.mrf_gfs_mem5_max_inundation_5day + - name: mrf_gfs_mem6_max_inundation_5day + flows_table: cache.max_flows_mrf_gfs_mem6_5day + target_table: ingest.mrf_gfs_mem6_max_inundation_5day + fim_type: hand + postprocess: + sql_file: mrf_gfs_mem6_max_inundation_5day + target_table: publish.mrf_gfs_mem6_max_inundation_5day \ No newline at end of file diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_max_high_flow_magnitude.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_max_high_flow_magnitude.yml index 190252fc..b8790be3 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_max_high_flow_magnitude.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_max_high_flow_magnitude.yml @@ -11,11 +11,21 @@ ingest_files: target_keys: (feature_id, streamflow) db_max_flows: - - name: mrf_gfs_max_flows - target_table: cache.max_flows_mrf_gfs + - name: mrf_gfs_3day_max_flows + target_table: cache.max_flows_mrf_gfs_3day target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_gfs_max_flows + max_flows_sql_file: mrf_gfs_3day_max_flows + - name: mrf_gfs_5day_max_flows + target_table: cache.max_flows_mrf_gfs_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_5day_max_flows + - name: mrf_gfs_10day_max_flows + target_table: cache.max_flows_mrf_gfs_10day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_10day_max_flows postprocess_sql: - sql_file: mrf_gfs_10day_max_high_flow_magnitude diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_max_inundation.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_max_inundation.yml index f2931242..177e6b2b 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_max_inundation.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_max_inundation.yml @@ -1,6 +1,7 @@ product: mrf_gfs_10day_max_inundation configuration: medium_range_mem1 product_type: "fim" +domain: conus run: true ingest_files: @@ -11,26 +12,39 @@ ingest_files: target_keys: (feature_id, streamflow) db_max_flows: - - name: mrf_gfs_max_flows - target_table: cache.max_flows_mrf_gfs + - name: mrf_gfs_3day_max_flows + target_table: cache.max_flows_mrf_gfs_3day target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_gfs_max_flows + max_flows_sql_file: mrf_gfs_3day_max_flows + - name: mrf_gfs_5day_max_flows + target_table: cache.max_flows_mrf_gfs_5day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_5day_max_flows + - name: mrf_gfs_10day_max_flows + target_table: cache.max_flows_mrf_gfs_10day + target_keys: (feature_id, streamflow) + method: database + max_flows_sql_file: mrf_gfs_10day_max_flows fim_configs: - name: mrf_gfs_max_inundation_3day + flows_table: cache.max_flows_mrf_gfs_3day target_table: ingest.mrf_gfs_max_inundation_3day fim_type: hand postprocess: sql_file: mrf_gfs_max_inundation_3day target_table: publish.mrf_gfs_max_inundation_3day - name: mrf_gfs_max_inundation_5day + flows_table: cache.max_flows_mrf_gfs_5day target_table: ingest.mrf_gfs_max_inundation_5day fim_type: hand postprocess: sql_file: mrf_gfs_max_inundation_5day target_table: publish.mrf_gfs_max_inundation_5day - name: mrf_gfs_max_inundation_10day + flows_table: cache.max_flows_mrf_gfs_10day target_table: ingest.mrf_gfs_max_inundation_10day fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_peak_flow_arrival_time.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_peak_flow_arrival_time.yml index b070db40..aa737697 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_peak_flow_arrival_time.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_peak_flow_arrival_time.yml @@ -11,11 +11,11 @@ ingest_files: target_keys: (feature_id, streamflow) db_max_flows: - - name: mrf_gfs_max_flows - target_table: cache.max_flows_mrf_gfs + - name: mrf_gfs_10day_max_flows + target_table: cache.max_flows_mrf_gfs_10day target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_gfs_max_flows + max_flows_sql_file: mrf_gfs_10day_max_flows postprocess_sql: - sql_file: mrf_gfs_10day_peak_flow_arrival_time diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_rapid_onset_flooding.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_rapid_onset_flooding.yml index a5dd8b1b..5f31c144 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_rapid_onset_flooding.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/medium_range_mem1/mrf_gfs_10day_rapid_onset_flooding.yml @@ -11,11 +11,11 @@ ingest_files: target_keys: (feature_id, streamflow) db_max_flows: - - name: mrf_gfs_max_flows - target_table: cache.max_flows_mrf_gfs + - name: mrf_gfs_10day_max_flows + target_table: cache.max_flows_mrf_gfs_10day target_keys: (feature_id, streamflow) method: database - max_flows_sql_file: mrf_gfs_max_flows + max_flows_sql_file: mrf_gfs_10day_max_flows postprocess_sql: - sql_file: mrf_gfs_10day_rapid_onset_flooding diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/reference/static_flow_based_catfim.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/reference/static_flow_based_catfim_deprecated.yml similarity index 100% rename from Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/reference/static_flow_based_catfim.yml rename to Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/reference/static_flow_based_catfim_deprecated.yml diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/reference/static_stage_based_catfim.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/reference/static_stage_based_catfim_deprecated.yml similarity index 100% rename from Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/reference/static_stage_based_catfim.yml rename to Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/reference/static_stage_based_catfim_deprecated.yml diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/replace_route/rfc_based_5day_max_inundation.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/replace_route/rfc_based_5day_max_inundation.yml index 11039b32..61a67596 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/replace_route/rfc_based_5day_max_inundation.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/replace_route/rfc_based_5day_max_inundation.yml @@ -1,10 +1,12 @@ product: rfc_based_5day_max_inundation configuration: replace_route product_type: "fim" +domain: conus run: true fim_configs: - name: rfc_based_5day_max_inundation + flows_table: cache.max_flows_rnr target_table: ingest.rfc_based_5day_max_inundation fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range/srf_18hr_max_inundation.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range/srf_18hr_max_inundation.yml index 37d901f2..9bb3fa9d 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range/srf_18hr_max_inundation.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range/srf_18hr_max_inundation.yml @@ -1,6 +1,7 @@ product: srf_18hr_max_inundation configuration: short_range product_type: "fim" +domain: conus run: true ingest_files: @@ -19,6 +20,7 @@ db_max_flows: fim_configs: - name: srf_18hr_max_inundation + flows_table: cache.max_flows_srf target_table: ingest.srf_18hr_max_inundation fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range_hawaii/srf_48hr_max_inundation_hi.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range_hawaii/srf_48hr_max_inundation_hi.yml index b3203eab..49274390 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range_hawaii/srf_48hr_max_inundation_hi.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range_hawaii/srf_48hr_max_inundation_hi.yml @@ -1,6 +1,7 @@ product: srf_48hr_max_inundation_hi configuration: short_range_hawaii product_type: "fim" +domain: hi run: true ingest_files: @@ -19,6 +20,7 @@ db_max_flows: fim_configs: - name: srf_48hr_max_inundation_hi + flows_table: cache.max_flows_srf_hi target_table: ingest.srf_48hr_max_inundation_hi fim_type: hand postprocess: diff --git a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range_puertorico/srf_48hr_max_inundation_prvi.yml b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range_puertorico/srf_48hr_max_inundation_prvi.yml index ed69c4d0..77bbfeca 100644 --- a/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range_puertorico/srf_48hr_max_inundation_prvi.yml +++ b/Core/LAMBDA/viz_functions/viz_initialize_pipeline/product_configs/short_range_puertorico/srf_48hr_max_inundation_prvi.yml @@ -1,6 +1,7 @@ product: srf_48hr_max_inundation_prvi configuration: short_range_puertorico product_type: "fim" +domain: prvi run: true ingest_files: @@ -19,6 +20,7 @@ db_max_flows: fim_configs: - name: srf_48hr_max_inundation_prvi + flows_table: cache.max_flows_srf_prvi target_table: ingest.srf_48hr_max_inundation_prvi fim_type: hand postprocess: diff --git a/Core/StepFunctions/viz_processing_pipeline.json.tftpl b/Core/StepFunctions/viz_processing_pipeline.json.tftpl index 3ba36246..bc072367 100644 --- a/Core/StepFunctions/viz_processing_pipeline.json.tftpl +++ b/Core/StepFunctions/viz_processing_pipeline.json.tftpl @@ -795,7 +795,124 @@ "Next": "Process Coastal (SCHISM) FIM" } ], - "Default": "FIM Data Preparation" + "Default": "FIM Cache - Save Flows to Redshift" + }, + "FIM Cache - Save Flows to Redshift": { + "Type": "Task", + "Resource": "arn:aws:states:::lambda:invoke", + "Parameters": { + "FunctionName": "${db_postprocess_sql_arn}", + "Payload": { + "args.$": "$", + "step": "hand_pre_processing", + "folder": "fim_caching_templates", + "sql_templates_to_run": [ + { + "sql_file": "0b_rds_create_inundation_tables_if_not_exist", + "db_type": "viz", + "check_dependencies": false + }, + { + "sql_file": "0a_redshift_create_inundation_tables_if_not_exist", + "db_type": "redshift" + }, + { + "sql_file": "1a_rds_build_inundation_flows_table", + "db_type": "viz" + }, + { + "sql_file": "1b_redshift_copy_inundation_flows", + "db_type": "redshift" + } + ] + } + }, + "Retry": [ + { + "ErrorEquals": [ + "Lambda.ServiceException", + "Lambda.AWSLambdaException", + "Lambda.SdkClientException", + "Lambda.TooManyRequestsException" + ], + "IntervalSeconds": 1, + "MaxAttempts": 3, + "BackoffRate": 2 + } + ], + "Next": "FIM Cache - Build FIM Table on Redshift", + "ResultPath": null + }, + "FIM Cache - Build FIM Table on Redshift": { + "Type": "Task", + "Resource": "arn:aws:states:::lambda:invoke", + "Parameters": { + "Payload": { + "args.$": "$", + "step": "hand_pre_processing", + "folder": "fim_caching_templates", + "sql_templates_to_run": [ + { + "sql_file": "2a_redshift_query_cached_fim_table", + "db_type": "redshift" + } + ] + }, + "FunctionName": "${db_postprocess_sql_arn}" + }, + "Retry": [ + { + "ErrorEquals": [ + "Lambda.ServiceException", + "Lambda.AWSLambdaException", + "Lambda.SdkClientException", + "Lambda.TooManyRequestsException" + ], + "IntervalSeconds": 1, + "MaxAttempts": 3, + "BackoffRate": 2 + } + ], + "Next": "FIM Cache - Load Cached FIM Into Ingest Tables", + "ResultPath": null + }, + "FIM Cache - Load Cached FIM Into Ingest Tables": { + "Type": "Task", + "Resource": "arn:aws:states:::lambda:invoke", + "Parameters": { + "Payload": { + "args.$": "$", + "step": "hand_pre_processing", + "folder": "fim_caching_templates", + "sql_templates_to_run": [ + { + "sql_file": "3a_rds_ras2fim_insertion", + "db_type": "viz" + }, + { + "sql_file": "3b_rds_cached_hand_insertion", + "db_type": "viz", + "check_dependencies": false + } + ] + }, + "FunctionName": "${db_postprocess_sql_arn}" + }, + "Retry": [ + { + "ErrorEquals": [ + "Lambda.ServiceException", + "Lambda.AWSLambdaException", + "Lambda.SdkClientException", + "Lambda.TooManyRequestsException" + ], + "IntervalSeconds": 1, + "MaxAttempts": 3, + "BackoffRate": 2 + } + ], + "Next": "FIM Data Preparation", + "ResultPath": null }, "Process Coastal (SCHISM) FIM": { "Type": "Task", @@ -845,7 +962,7 @@ } }, "ItemsPath": "$.huc_processing_payload.hucs_to_process", - "Next": "Postprocess SQL - FIM Config", + "Next": "FIM Cache - Add Processed FIM to Cache", "ItemSelector": { "huc_branches_to_process.$": "$$.Map.Item.Value", "data_bucket.$": "$.huc_processing_payload.data_bucket", @@ -858,6 +975,40 @@ "MaxConcurrency": 4, "ResultPath": null }, + "FIM Cache - Add Processed FIM to Cache": { + "Type": "Task", + "Resource": "arn:aws:states:::lambda:invoke", + "Parameters": { + "FunctionName": "arn:aws:lambda:us-east-1:526904826677:function:hv-vpp-cached-fim-viz-db-postprocess-sql:$LATEST", + "Payload": { + "args.$": "$", + "step": "hand_post_processing", + "folder": "fim_caching_templates", + "sql_templates_to_run": [ + { + "sql_file": "5a_redshift_cache_fim_from_rds", + "db_type": "redshift", + "check_dependencies": false + } + ] + } + }, + "Retry": [ + { + "ErrorEquals": [ + "Lambda.ServiceException", + "Lambda.AWSLambdaException", + "Lambda.SdkClientException", + "Lambda.TooManyRequestsException" + ], + "IntervalSeconds": 1, + "MaxAttempts": 3, + "BackoffRate": 2 + } + ], + "Next": "Postprocess SQL - FIM Config", + "ResultPath": null + }, "Postprocess SQL - FIM Config": { "Type": "Task", "Resource": "arn:aws:states:::lambda:invoke",