diff --git a/.gitignore b/.gitignore index 89fa2675..bbeb945f 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,16 @@ tests/.coverage .vscode/launch.json data/sql/counties_database.db data/sql/msa_database.db +.Rproj.user +**/*.RData +**/*.Rhistory + +# data +data/minneapolis/processed/values_long.csv +data/minneapolis/processed/values_with_parking.csv +data/minneapolis/sourced/demographic/** +data/minneapolis/preds/** +data/minneapolis/sourced/parcel_to_census_tract_mappings/** +data/minneapolis/sourced/parcel_to_parking_info_mappings/** + +data/minneapolis/.pgpass diff --git a/cities/modeling/modeling_utils.py b/cities/modeling/modeling_utils.py index 966a0ba5..30ea4d48 100644 --- a/cities/modeling/modeling_utils.py +++ b/cities/modeling/modeling_utils.py @@ -68,8 +68,8 @@ def prep_wide_data_for_inference( 4. Loads the required transformed features. 5. Merges fixed covariates into a joint dataframe based on a common ID column. 6. Ensures that the GeoFIPS (geographical identifier) is consistent across datasets. - 7. Extracts common years for which both intervention and outcome data are available. - 8. Shifts the outcome variable forward by the specified number of time steps. + 7. Shifts the outcome variable forward by the specified number of time steps determined by forward_shift. + 8. Extracts common years for which both intervention and outcome data are available. 9. Prepares tensors for input features (x), interventions (t), and outcomes (y). 10. Creates indices for states and units, preparing them as tensors. 11. Validates the shapes of the tensors. @@ -124,50 +124,25 @@ def prep_wide_data_for_inference( assert f_covariates_joint["GeoFIPS"].equals(intervention["GeoFIPS"]) - # extract data for which intervention and outcome overlap - year_min = max( - intervention.columns[2:].astype(int).min(), - outcome.columns[2:].astype(int).min(), - ) - - year_max = min( - intervention.columns[2:].astype(int).max(), - outcome.columns[2:].astype(int).max(), - ) - - assert all(intervention["GeoFIPS"] == outcome["GeoFIPS"]) - + # This is for the downstream variable outcome_years_to_keep = [ year for year in outcome.columns[2:] - if year_min <= int(year) <= year_max + forward_shift + if str(int(year) - forward_shift) in intervention.columns[2:] ] - outcome_years_to_keep = [ - year for year in outcome_years_to_keep if year in intervention.columns[2:] - ] - - outcome = outcome[outcome_years_to_keep] - - # shift outcome `forward_shift` steps ahead - # for the prediction task - outcome_shifted = outcome.copy() - - for i in range(len(outcome_years_to_keep) - forward_shift): - outcome_shifted.iloc[:, i] = outcome_shifted.iloc[:, i + forward_shift] - - years_to_drop = [ - f"{year}" for year in range(year_max - forward_shift + 1, year_max + 1) - ] - outcome_shifted.drop(columns=years_to_drop, inplace=True) - + # extract data for which intervention and outcome overlap + outcome.drop(columns=["GeoFIPS", "GeoName"], inplace=True) intervention.drop(columns=["GeoFIPS", "GeoName"], inplace=True) - intervention = intervention[outcome_shifted.columns] + outcome_shifted = outcome.rename(lambda x: str(int(x) - forward_shift), axis=1) + years_available = [ + year for year in intervention.columns if year in outcome_shifted.columns + ] + intervention = intervention[years_available] + outcome_shifted = outcome_shifted[years_available] assert intervention.shape == outcome_shifted.shape - years_available = outcome_shifted.columns.astype(int).values - unit_index = pd.factorize(f_covariates_joint["GeoFIPS"].values)[0] state_index = pd.factorize(f_covariates_joint["GeoFIPS"].values // 1000)[0] @@ -197,12 +172,13 @@ def prep_wide_data_for_inference( model_args = (N_t, N_cov, N_s, N_u, state_index, unit_index) + int_year_available = [int(year) for year in years_available] return { "model_args": model_args, "x": x, "t": t, "y": y, - "years_available": years_available, + "years_available": int_year_available, "outcome_years": outcome_years_to_keep, "covariates_df": f_covariates_joint, } @@ -222,7 +198,10 @@ def train_interactions_model( guide = AutoNormal(conditioned_model) svi = SVI( - model=conditioned_model, guide=guide, optim=Adam({"lr": lr}), loss=Trace_ELBO() + model=conditioned_model, + guide=guide, + optim=Adam({"lr": lr}), # type: ignore + loss=Trace_ELBO(), ) losses = [] diff --git a/cities/modeling/tau_caching_pipeline.py b/cities/modeling/tau_caching_pipeline.py index b517d522..29e3a51b 100644 --- a/cities/modeling/tau_caching_pipeline.py +++ b/cities/modeling/tau_caching_pipeline.py @@ -42,8 +42,8 @@ num_files = len(files) logging.info( - f"{(num_files-2)} sample dictionaries already exist. " - f"Starting to obtain {N_combinations_samples - (num_files -2)}" + f"{(num_files - 2)} sample dictionaries already exist. " + f"Starting to obtain {N_combinations_samples - (num_files - 2)}" f" out of {N_combinations_samples} sample dictionaries needed." ) remaining = N_combinations_samples - (num_files - 2) @@ -84,5 +84,5 @@ logging.info( f"All samples are now available." - f"Sampling took {session_ends - session_start:.2f} seconds, or {(session_ends - session_start)/60:.2f} minutes." + f"Sampling took {session_ends - session_start:.2f} seconds, or {(session_ends - session_start) / 60:.2f} minutes." ) diff --git a/cities/modeling/training_pipeline.py b/cities/modeling/training_pipeline.py index 3f4ebc72..f69e861a 100644 --- a/cities/modeling/training_pipeline.py +++ b/cities/modeling/training_pipeline.py @@ -42,8 +42,8 @@ logging.info( - f"{(num_files-2)/2} guides already exist. " - f"Starting to train {N_combinations - (num_files -2)/2} out of {N_combinations} guides needed." + f"{(num_files - 2) / 2} guides already exist. " + f"Starting to train {N_combinations - (num_files - 2) / 2} out of {N_combinations} guides needed." ) remaining = N_combinations - (num_files - 2) / 2 diff --git a/scripts/clean.sh b/scripts/clean.sh index fe727a37..6918545f 100755 --- a/scripts/clean.sh +++ b/scripts/clean.sh @@ -1,13 +1,22 @@ #!/bin/bash set -euxo pipefail -# isort suspended till the CI-vs-local issue is resolved -# isort cities/ tests/ +# isort suspended as conflicting with black +# nbqa isort docs/guides/ + + +# this sometimes conflicts with black but does some +# preliminary import sorting +# and is then overriden by black +isort cities/ tests/ + +black ./cities/ ./tests/ ./docs/guides/ + +black docs/guides/ -black cities/ tests/ autoflake --remove-all-unused-imports --in-place --recursive ./cities ./tests -nbqa autoflake --remove-all-unused-imports --recursive --in-place docs/guides/ -# nbqa isort docs/guides/ -nbqa black docs/guides/ +nbqa autoflake --nbqa-shell --remove-all-unused-imports --recursive --in-place docs/guides/ + +#nbqa black docs/guides/ diff --git a/scripts/lint.sh b/scripts/lint.sh index 538aeeb1..5e5b9abe 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -3,10 +3,10 @@ set -euxo pipefail mypy --ignore-missing-imports cities/ #isort --check --diff cities/ tests/ -black --check cities/ tests/ +black --check cities/ tests/ docs/guides/ flake8 cities/ tests/ --ignore=E203,W503 --max-line-length=127 -nbqa autoflake -v --recursive --check docs/guides/ +nbqa autoflake --nbqa-shell -v --recursive --check docs/guides/ #nbqa isort --check docs/guides/ -nbqa black --check docs/guides/ + diff --git a/scripts/test_notebooks.sh b/scripts/test_notebooks.sh index b31a8820..f5defc99 100755 --- a/scripts/test_notebooks.sh +++ b/scripts/test_notebooks.sh @@ -1,5 +1,5 @@ #!/bin/bash -INCLUDED_NOTEBOOKS="docs/guides/ docs/testing_notebooks/" +INCLUDED_NOTEBOOKS="docs/guides/ " # docs/testing_notebooks/" will revert when the pyro-ppl 1.9 bug is fixed CI=1 pytest -v --nbval-lax --dist loadscope -n auto $INCLUDED_NOTEBOOKS