Skip to content

Commit

Permalink
Merge pull request #142 from BasisResearch/jialu-training-pipeline
Browse files Browse the repository at this point in the history
simplify the selection of years in the training_pipeline
  • Loading branch information
rfl-urbaniak authored Nov 14, 2024
2 parents db66c26 + 8824f7f commit 21f59c6
Show file tree
Hide file tree
Showing 7 changed files with 55 additions and 54 deletions.
13 changes: 13 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,16 @@ tests/.coverage
.vscode/launch.json
data/sql/counties_database.db
data/sql/msa_database.db
.Rproj.user
**/*.RData
**/*.Rhistory

# data
data/minneapolis/processed/values_long.csv
data/minneapolis/processed/values_with_parking.csv
data/minneapolis/sourced/demographic/**
data/minneapolis/preds/**
data/minneapolis/sourced/parcel_to_census_tract_mappings/**
data/minneapolis/sourced/parcel_to_parking_info_mappings/**

data/minneapolis/.pgpass
57 changes: 18 additions & 39 deletions cities/modeling/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def prep_wide_data_for_inference(
4. Loads the required transformed features.
5. Merges fixed covariates into a joint dataframe based on a common ID column.
6. Ensures that the GeoFIPS (geographical identifier) is consistent across datasets.
7. Extracts common years for which both intervention and outcome data are available.
8. Shifts the outcome variable forward by the specified number of time steps.
7. Shifts the outcome variable forward by the specified number of time steps determined by forward_shift.
8. Extracts common years for which both intervention and outcome data are available.
9. Prepares tensors for input features (x), interventions (t), and outcomes (y).
10. Creates indices for states and units, preparing them as tensors.
11. Validates the shapes of the tensors.
Expand Down Expand Up @@ -124,50 +124,25 @@ def prep_wide_data_for_inference(

assert f_covariates_joint["GeoFIPS"].equals(intervention["GeoFIPS"])

# extract data for which intervention and outcome overlap
year_min = max(
intervention.columns[2:].astype(int).min(),
outcome.columns[2:].astype(int).min(),
)

year_max = min(
intervention.columns[2:].astype(int).max(),
outcome.columns[2:].astype(int).max(),
)

assert all(intervention["GeoFIPS"] == outcome["GeoFIPS"])

# This is for the downstream variable
outcome_years_to_keep = [
year
for year in outcome.columns[2:]
if year_min <= int(year) <= year_max + forward_shift
if str(int(year) - forward_shift) in intervention.columns[2:]
]

outcome_years_to_keep = [
year for year in outcome_years_to_keep if year in intervention.columns[2:]
]

outcome = outcome[outcome_years_to_keep]

# shift outcome `forward_shift` steps ahead
# for the prediction task
outcome_shifted = outcome.copy()

for i in range(len(outcome_years_to_keep) - forward_shift):
outcome_shifted.iloc[:, i] = outcome_shifted.iloc[:, i + forward_shift]

years_to_drop = [
f"{year}" for year in range(year_max - forward_shift + 1, year_max + 1)
]
outcome_shifted.drop(columns=years_to_drop, inplace=True)

# extract data for which intervention and outcome overlap
outcome.drop(columns=["GeoFIPS", "GeoName"], inplace=True)
intervention.drop(columns=["GeoFIPS", "GeoName"], inplace=True)
intervention = intervention[outcome_shifted.columns]
outcome_shifted = outcome.rename(lambda x: str(int(x) - forward_shift), axis=1)
years_available = [
year for year in intervention.columns if year in outcome_shifted.columns
]
intervention = intervention[years_available]
outcome_shifted = outcome_shifted[years_available]

assert intervention.shape == outcome_shifted.shape

years_available = outcome_shifted.columns.astype(int).values

unit_index = pd.factorize(f_covariates_joint["GeoFIPS"].values)[0]
state_index = pd.factorize(f_covariates_joint["GeoFIPS"].values // 1000)[0]

Expand Down Expand Up @@ -197,12 +172,13 @@ def prep_wide_data_for_inference(

model_args = (N_t, N_cov, N_s, N_u, state_index, unit_index)

int_year_available = [int(year) for year in years_available]
return {
"model_args": model_args,
"x": x,
"t": t,
"y": y,
"years_available": years_available,
"years_available": int_year_available,
"outcome_years": outcome_years_to_keep,
"covariates_df": f_covariates_joint,
}
Expand All @@ -222,7 +198,10 @@ def train_interactions_model(
guide = AutoNormal(conditioned_model)

svi = SVI(
model=conditioned_model, guide=guide, optim=Adam({"lr": lr}), loss=Trace_ELBO()
model=conditioned_model,
guide=guide,
optim=Adam({"lr": lr}), # type: ignore
loss=Trace_ELBO(),
)

losses = []
Expand Down
6 changes: 3 additions & 3 deletions cities/modeling/tau_caching_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@
num_files = len(files)

logging.info(
f"{(num_files-2)} sample dictionaries already exist. "
f"Starting to obtain {N_combinations_samples - (num_files -2)}"
f"{(num_files - 2)} sample dictionaries already exist. "
f"Starting to obtain {N_combinations_samples - (num_files - 2)}"
f" out of {N_combinations_samples} sample dictionaries needed."
)
remaining = N_combinations_samples - (num_files - 2)
Expand Down Expand Up @@ -84,5 +84,5 @@

logging.info(
f"All samples are now available."
f"Sampling took {session_ends - session_start:.2f} seconds, or {(session_ends - session_start)/60:.2f} minutes."
f"Sampling took {session_ends - session_start:.2f} seconds, or {(session_ends - session_start) / 60:.2f} minutes."
)
4 changes: 2 additions & 2 deletions cities/modeling/training_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@


logging.info(
f"{(num_files-2)/2} guides already exist. "
f"Starting to train {N_combinations - (num_files -2)/2} out of {N_combinations} guides needed."
f"{(num_files - 2) / 2} guides already exist. "
f"Starting to train {N_combinations - (num_files - 2) / 2} out of {N_combinations} guides needed."
)

remaining = N_combinations - (num_files - 2) / 2
Expand Down
21 changes: 15 additions & 6 deletions scripts/clean.sh
Original file line number Diff line number Diff line change
@@ -1,13 +1,22 @@
#!/bin/bash
set -euxo pipefail

# isort suspended till the CI-vs-local issue is resolved
# isort cities/ tests/
# isort suspended as conflicting with black
# nbqa isort docs/guides/


# this sometimes conflicts with black but does some
# preliminary import sorting
# and is then overriden by black
isort cities/ tests/

black ./cities/ ./tests/ ./docs/guides/

black docs/guides/

black cities/ tests/
autoflake --remove-all-unused-imports --in-place --recursive ./cities ./tests

nbqa autoflake --remove-all-unused-imports --recursive --in-place docs/guides/
# nbqa isort docs/guides/
nbqa black docs/guides/
nbqa autoflake --nbqa-shell --remove-all-unused-imports --recursive --in-place docs/guides/

#nbqa black docs/guides/

6 changes: 3 additions & 3 deletions scripts/lint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ set -euxo pipefail

mypy --ignore-missing-imports cities/
#isort --check --diff cities/ tests/
black --check cities/ tests/
black --check cities/ tests/ docs/guides/
flake8 cities/ tests/ --ignore=E203,W503 --max-line-length=127


nbqa autoflake -v --recursive --check docs/guides/
nbqa autoflake --nbqa-shell -v --recursive --check docs/guides/
#nbqa isort --check docs/guides/
nbqa black --check docs/guides/

2 changes: 1 addition & 1 deletion scripts/test_notebooks.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash

INCLUDED_NOTEBOOKS="docs/guides/ docs/testing_notebooks/"
INCLUDED_NOTEBOOKS="docs/guides/ " # docs/testing_notebooks/" will revert when the pyro-ppl 1.9 bug is fixed

CI=1 pytest -v --nbval-lax --dist loadscope -n auto $INCLUDED_NOTEBOOKS

0 comments on commit 21f59c6

Please sign in to comment.