Skip to content

Commit

Permalink
Merge branch 'develop' into epw_checks
Browse files Browse the repository at this point in the history
  • Loading branch information
nweires committed Mar 26, 2024
2 parents 5a83a58 + 59a307d commit c0b84f1
Show file tree
Hide file tree
Showing 7 changed files with 1,585 additions and 2 deletions.
53 changes: 53 additions & 0 deletions buildstockbatch/cloud/docker_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import logging
import math
import os
import pandas as pd
import pathlib
import random
import shutil
Expand Down Expand Up @@ -120,6 +121,9 @@ def __init__(self, project_filename):
logger.error("The docker server did not respond, make sure Docker Desktop is started then retry.")
raise RuntimeError("The docker server did not respond, make sure Docker Desktop is started then retry.")

def get_fs(self):
return LocalFileSystem()

@staticmethod
def validate_project(project_file):
super(DockerBatchBase, DockerBatchBase).validate_project(project_file)
Expand Down Expand Up @@ -539,3 +543,52 @@ def run_simulations(cls, cfg, job_id, jobs_d, sim_dir, fs, output_path):
shutil.rmtree(item)
elif os.path.isfile(item):
os.remove(item)

def log_summary(self):
"""
Log a summary of how many simulations succeeded, failed, or ended with other statuses.
Uses the `completed_status` column of the files in results/parquet/.../results_up*.parquet.
"""
fs = self.get_fs()
# Summary of simulation statuses across all upgrades
status_summary = {}
total_counts = collections.defaultdict(int)

results_glob = f"{self.results_dir}/parquet/**/results_up*.parquet"
try:
results_files = fs.glob(results_glob)
except FileNotFoundError:
logger.info(f"No results parquet files found at {results_glob}")
return

for result in results_files:
upgrade_id = result.split(".")[0][-2:]
with fs.open(result) as f:
df = pd.read_parquet(f, columns=["completed_status"])
# Dict mapping from status (e.g. "Success") to count
statuses = df.groupby("completed_status").size().to_dict()
status_summary[upgrade_id] = statuses
for status, count in statuses.items():
total_counts[status] += count

# Always include these statuses and show them first
always_use = ["Success", "Fail"]
all_statuses = always_use + list(total_counts.keys() - set(always_use))
s = "Final status of all simulations:"
for upgrade, counts in status_summary.items():
if upgrade == "00":
s += "\nBaseline "
else:
s += f"\nUpgrade {upgrade} "
for status in all_statuses:
s += f"{status}: {counts.get(status, 0):<7d} "

s += "\n\nTotal "
for status in all_statuses:
s += f"{status}: {total_counts.get(status, 0):<7d} "
s += "\n"

for upgrade in postprocessing.get_upgrade_list(self.cfg):
if f"{upgrade:02d}" not in status_summary:
s += f"\nNo results found for Upgrade {upgrade}"
logger.info(s)
14 changes: 13 additions & 1 deletion buildstockbatch/postprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,21 @@ def read_out_osw(fs, filename):
keys_to_copy = ["started_at", "completed_at", "completed_status"]
for key in keys_to_copy:
out_d[key] = d.get(key, None)

step_errors = []
for step in d.get("steps", []):
if step["measure_dir_name"] == "BuildExistingModel":
measure_dir_name = step["measure_dir_name"]
if measure_dir_name == "BuildExistingModel":
out_d["building_id"] = step["arguments"]["building_id"]

# Collect error messages from any failed steps.
if result := step.get("result"):
if result.get("step_result", "Success") != "Success":
step_errors.append({"measure_dir_name": measure_dir_name, "step_errors": result.get("step_errors")})

if step_errors:
out_d["step_failures"] = step_errors

return out_d


Expand Down
26 changes: 26 additions & 0 deletions buildstockbatch/test/test_docker_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from fsspec.implementations.local import LocalFileSystem
import gzip
import json
import logging
import os
import pathlib
import shutil
Expand Down Expand Up @@ -160,3 +161,28 @@ def test_run_simulations(basic_residential_project_file):
# Check that files were cleaned up correctly
assert not os.listdir(sim_dir)
os.chdir(old_cwd)


def test_log_summary(basic_residential_project_file, mocker, caplog):
"""
Test logging a summary of simulation statuses.
"""
project_filename, results_dir = basic_residential_project_file()

mocker.patch.object(DockerBatchBase, "results_dir", results_dir)
dbb = DockerBatchBase(project_filename)
# Add results CSV files
shutil.copytree(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_results",
"parquet",
),
os.path.join(results_dir, "parquet"),
)

with caplog.at_level(logging.INFO):
dbb.log_summary()
assert "Upgrade 01 Success: 4 Fail: 0" in caplog.text
assert "Baseline Success: 4 Fail: 0" in caplog.text
assert "Total Success: 8 Fail: 0" in caplog.text
2 changes: 1 addition & 1 deletion buildstockbatch/test/test_hpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def make_sim_dir_mock(building_id, upgrade_idx, base_dir, overwrite_existing=Fal
# check results job-json
refrence_path = pathlib.Path(__file__).resolve().parent / "test_results" / "reference_files"

refrence_list = json.loads(gzip.open(refrence_path / "results_job1.json.gz", "r").read())
refrence_list = json.loads(open(refrence_path / "results_job1.json", "r").read())

output_list = json.loads(gzip.open(results_dir / "simulation_output" / "results_job1.json.gz", "r").read())

Expand Down
Loading

0 comments on commit c0b84f1

Please sign in to comment.