Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

simplify the selection of years in the training_pipeline #142

Merged
merged 11 commits into from
Nov 14, 2024
13 changes: 13 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,16 @@ tests/.coverage
.vscode/launch.json
data/sql/counties_database.db
data/sql/msa_database.db
.Rproj.user
**/*.RData
**/*.Rhistory

# data
data/minneapolis/processed/values_long.csv
data/minneapolis/processed/values_with_parking.csv
data/minneapolis/sourced/demographic/**
data/minneapolis/preds/**
data/minneapolis/sourced/parcel_to_census_tract_mappings/**
data/minneapolis/sourced/parcel_to_parking_info_mappings/**

data/minneapolis/.pgpass
56 changes: 14 additions & 42 deletions cities/modeling/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def prep_wide_data_for_inference(
4. Loads the required transformed features.
5. Merges fixed covariates into a joint dataframe based on a common ID column.
6. Ensures that the GeoFIPS (geographical identifier) is consistent across datasets.
7. Extracts common years for which both intervention and outcome data are available.
8. Shifts the outcome variable forward by the specified number of time steps.
7. Shifts the outcome variable forward by the specified number of time steps determined by forward_shift.
8. Extracts common years for which both intervention and outcome data are available.
9. Prepares tensors for input features (x), interventions (t), and outcomes (y).
10. Creates indices for states and units, preparing them as tensors.
11. Validates the shapes of the tensors.
Expand Down Expand Up @@ -125,49 +125,17 @@ def prep_wide_data_for_inference(
assert f_covariates_joint["GeoFIPS"].equals(intervention["GeoFIPS"])

# extract data for which intervention and outcome overlap
year_min = max(
intervention.columns[2:].astype(int).min(),
outcome.columns[2:].astype(int).min(),
)

year_max = min(
intervention.columns[2:].astype(int).max(),
outcome.columns[2:].astype(int).max(),
)

assert all(intervention["GeoFIPS"] == outcome["GeoFIPS"])

outcome_years_to_keep = [
year
for year in outcome.columns[2:]
if year_min <= int(year) <= year_max + forward_shift
]

outcome_years_to_keep = [
year for year in outcome_years_to_keep if year in intervention.columns[2:]
]

Copy link
Contributor Author

@JialuJialu JialuJialu Aug 12, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The line 106 to line 114 works the same as
outcome_years_to_keep = [year for year in outcome.columns[2:] if year in intervention.columns[2:]
It seems that the intent was to write
outcome_years_to_keep = [year for year in outcome.columns[2:] if year - forward_shift in intervention.columns[2:]

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alright, I think the issue was dropping a variable that was needed elsewhere.

outcome = outcome[outcome_years_to_keep]

# shift outcome `forward_shift` steps ahead
# for the prediction task
outcome_shifted = outcome.copy()

for i in range(len(outcome_years_to_keep) - forward_shift):
outcome_shifted.iloc[:, i] = outcome_shifted.iloc[:, i + forward_shift]

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The shifting could be completed in one step through renaming.

years_to_drop = [
f"{year}" for year in range(year_max - forward_shift + 1, year_max + 1)
]
outcome_shifted.drop(columns=years_to_drop, inplace=True)

outcome.drop(columns=["GeoFIPS", "GeoName"], inplace=True)
intervention.drop(columns=["GeoFIPS", "GeoName"], inplace=True)
intervention = intervention[outcome_shifted.columns]
outcome_shifted = outcome.rename(lambda x: str(int(x) - forward_shift), axis=1)
years_available = [
year for year in intervention.columns if year in outcome_shifted.columns
]
intervention = intervention[years_available]
outcome_shifted = outcome_shifted[years_available]

assert intervention.shape == outcome_shifted.shape

years_available = outcome_shifted.columns.astype(int).values

unit_index = pd.factorize(f_covariates_joint["GeoFIPS"].values)[0]
state_index = pd.factorize(f_covariates_joint["GeoFIPS"].values // 1000)[0]

Expand Down Expand Up @@ -197,6 +165,7 @@ def prep_wide_data_for_inference(

model_args = (N_t, N_cov, N_s, N_u, state_index, unit_index)

int_year_available = [int(year) for year in years_available]
return {
"model_args": model_args,
"x": x,
Expand All @@ -222,7 +191,10 @@ def train_interactions_model(
guide = AutoNormal(conditioned_model)

svi = SVI(
model=conditioned_model, guide=guide, optim=Adam({"lr": lr}), loss=Trace_ELBO()
model=conditioned_model,
guide=guide,
optim=ClippedAdam({"lr": lr}),
loss=Trace_ELBO(),
)

losses = []
Expand Down
6 changes: 3 additions & 3 deletions cities/modeling/tau_caching_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@
num_files = len(files)

logging.info(
f"{(num_files-2)} sample dictionaries already exist. "
f"Starting to obtain {N_combinations_samples - (num_files -2)}"
f"{(num_files - 2)} sample dictionaries already exist. "
f"Starting to obtain {N_combinations_samples - (num_files - 2)}"
f" out of {N_combinations_samples} sample dictionaries needed."
)
remaining = N_combinations_samples - (num_files - 2)
Expand Down Expand Up @@ -84,5 +84,5 @@

logging.info(
f"All samples are now available."
f"Sampling took {session_ends - session_start:.2f} seconds, or {(session_ends - session_start)/60:.2f} minutes."
f"Sampling took {session_ends - session_start:.2f} seconds, or {(session_ends - session_start) / 60:.2f} minutes."
)
4 changes: 2 additions & 2 deletions cities/modeling/training_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@


logging.info(
f"{(num_files-2)/2} guides already exist. "
f"Starting to train {N_combinations - (num_files -2)/2} out of {N_combinations} guides needed."
f"{(num_files - 2) / 2} guides already exist. "
f"Starting to train {N_combinations - (num_files - 2) / 2} out of {N_combinations} guides needed."
)

remaining = N_combinations - (num_files - 2) / 2
Expand Down
Loading