Skip to content

Commit

Permalink
Merge pull request #823 from dirac-institute/black_MJ_pR
Browse files Browse the repository at this point in the history
black files linter checks are failing on
  • Loading branch information
mschwamb authored Feb 16, 2024
2 parents 2624c12 + 65cdb63 commit 610f085
Show file tree
Hide file tree
Showing 6 changed files with 33 additions and 24 deletions.
13 changes: 5 additions & 8 deletions src/sorcha/ephemeris/simulation_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ class EphemerisGeometryParameters:


def get_vec(row, vecname):
return np.asarray([ row[f"{vecname}_x"], row[f"{vecname}_y"], row[f"{vecname}_z"] ])
return np.asarray([row[f"{vecname}_x"], row[f"{vecname}_y"], row[f"{vecname}_z"]])


def create_ephemeris(orbits_df, pointings_df, args, configs):
"""Generate a set of observations given a collection of orbits
Expand Down Expand Up @@ -173,7 +174,7 @@ def create_ephemeris(orbits_df, pointings_df, args, configs):
# the majority of the computation to build out `pixel_dict`.
desigs = set()
pixId = pointings_df.attrs["pixels"]
for pix in pixId[pointing["pixels_begin"]:pointing["pixels_end"]]:
for pix in pixId[pointing["pixels_begin"] : pointing["pixels_end"]]:
desigs.update(pixel_dict[pix])

for obj_id in sorted(desigs):
Expand All @@ -193,14 +194,10 @@ def create_ephemeris(orbits_df, pointings_df, args, configs):
_,
ephem_geom_params.r_ast,
ephem_geom_params.v_ast,
) = integrate_light_time(
sim, ex, pointing["JD_TDB"] - ephem.jd_ref, r_obs, lt0=0.01
)
) = integrate_light_time(sim, ex, pointing["JD_TDB"] - ephem.jd_ref, r_obs, lt0=0.01)
ephem_geom_params.rho_hat = ephem_geom_params.rho / ephem_geom_params.rho_mag

ang_from_center = (
180 / np.pi * np.arccos(np.dot(ephem_geom_params.rho_hat, visit_vector))
)
ang_from_center = 180 / np.pi * np.arccos(np.dot(ephem_geom_params.rho_hat, visit_vector))
if ang_from_center < ang_fov:
out_tuple = calculate_rates_and_geometry(pointing, ephem_geom_params)
in_memory_csv.writerow(out_tuple)
Expand Down
6 changes: 4 additions & 2 deletions src/sorcha/ephemeris/simulation_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,15 +210,17 @@ def precompute_pointing_information(pointings_df, args, configs):
# use pandas `apply` again because healpy.query_disc is not easily vectorizable
pixels, i = [], 0
pixBeginEnd = np.empty((len(pointings_df), 2), dtype=np.int32)
for k, (fieldRa, fieldDec) in enumerate(zip(pointings_df["fieldRA"].values, pointings_df["fieldDec"].values)):
for k, (fieldRa, fieldDec) in enumerate(
zip(pointings_df["fieldRA"].values, pointings_df["fieldDec"].values)
):
fieldPix = partial_get_hp_neighbors(fieldRa, fieldDec)
nPix = len(fieldPix)
pixels.append(fieldPix)
pixBeginEnd[k] = i, i + nPix
i += nPix
pointings_df["pixels_begin"], pointings_df["pixels_end"] = pixBeginEnd[:, 0], pixBeginEnd[:, 1]
pixels = np.concatenate(pixels, dtype=np.int32)
pointings_df.attrs = { "pixels": pixels }
pointings_df.attrs = {"pixels": pixels}

# create empty arrays for observatory position and velocity to be filled in
r_obs = np.empty((len(pointings_df), 3))
Expand Down
2 changes: 1 addition & 1 deletion src/sorcha/modules/PPMatchPointingToObservations.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def PPMatchPointingToObservations(padain, pointfildb):
# they don't need to be included in the result df, so exclude them from the merge.
pointing_columns_to_skip = ["JD_TDB", "pixels_begin", "pixels_end"]
for name in ["visit_vector", "pixels", "r_obs", "v_obs", "r_sun", "v_sun"]:
pointing_columns_to_skip += [ f"{name}_x", f"{name}_y", f"{name}_z" ]
pointing_columns_to_skip += [f"{name}_x", f"{name}_y", f"{name}_z"]

resdf = pd.merge(
padain,
Expand Down
2 changes: 1 addition & 1 deletion src/sorcha/modules/PPReadPointingDatabase.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def PPReadPointingDatabase(bsdbname, observing_filters, dbquery, surveyname):
df["observationId_"] = df["observationId"]
df = df.rename(columns={"observationId": "FieldID"})
df = df.rename(columns={"filter": "optFilter"}) # not to confuse with the pandas filter command
df["optFilter"] = df["optFilter"].astype("category") # save memory
df["optFilter"] = df["optFilter"].astype("category") # save memory
dfo = df[df.optFilter.isin(observing_filters)].copy()

# at the moment the RubinSim pointing databases don't record the observation
Expand Down
18 changes: 10 additions & 8 deletions src/sorcha/sorcha.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,14 @@ def cite():
"""
cite_sorcha()


def mem(df):
usage = df.memory_usage(deep=True).sum()
for k, v in df.attrs.items():
usage += v.nbytes
return usage


def runLSSTSimulation(args, configs):
"""
Runs the post processing survey simulator functions that apply a series of
Expand Down Expand Up @@ -117,17 +119,17 @@ def runLSSTSimulation(args, configs):
filterpointing = PPReadPointingDatabase(
args.pointing_database, configs["observing_filters"], configs["pointing_sql_query"], args.surveyname
)
## print("POSTREAD:", len(filterpointing), type(filterpointing), mem(filterpointing))
## print(filterpointing.dtypes)
## print("POSTREAD:", len(filterpointing), type(filterpointing), mem(filterpointing))
## print(filterpointing.dtypes)

# if we are going to compute the ephemerides, then we should pre-compute all
# of the needed values derived from the pointing information.
if configs["ephemerides_type"].casefold() != "external":
verboselog("Pre-computing pointing information for ephemeris generation")
filterpointing = precompute_pointing_information(filterpointing, args, configs)
## print("POSTPOINTING:", len(filterpointing), type(filterpointing), mem(filterpointing))
## print(filterpointing.dtypes)
## print("POSTPOINTING:", len(filterpointing), type(filterpointing), filterpointing.memory_usage(deep=True))
## print("POSTPOINTING:", len(filterpointing), type(filterpointing), mem(filterpointing))
## print(filterpointing.dtypes)
## print("POSTPOINTING:", len(filterpointing), type(filterpointing), filterpointing.memory_usage(deep=True))

# Set up the data readers.
ephem_type = configs["ephemerides_type"]
Expand All @@ -150,9 +152,9 @@ def runLSSTSimulation(args, configs):
if configs["comet_activity"] is not None or configs["lc_model"] is not None:
reader.add_aux_data_reader(CSVDataReader(args.complex_parameters, configs["aux_format"]))

# import time
# time.sleep(100)
# exit()
# import time
# time.sleep(100)
# exit()

# In case of a large input file, the data is read in chunks. The
# "sizeSerialChunk" parameter in PPConfig.ini assigns the chunk.
Expand Down
16 changes: 12 additions & 4 deletions tests/ephemeris/test_simulation_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,18 @@ def test_calculate_rates_and_geometry():
"visit_vector_z": -0.399435561333849,
"JD_TDB": 2460219.484998981,
"pixels": None,
"r_obs_x": 0.9825025212987633, "r_obs_y": 0.12894773431445178, "r_obs_z": 0.056115072741286603,
"v_obs_x": -0.002514846222194574, "v_obs_y": 0.015645226468919866, "v_obs_z": 0.006740310710189443,
"r_sun_x": -0.008375571318557293, "r_sun_y": -0.0021278397223137443, "r_sun_z": -0.0006896179222345509,
"v_sun_x": 4.014508061373484e-06,"v_sun_y": -7.199434717117629e-06, "v_sun_z": -3.1502131721138966e-06,
"r_obs_x": 0.9825025212987633,
"r_obs_y": 0.12894773431445178,
"r_obs_z": 0.056115072741286603,
"v_obs_x": -0.002514846222194574,
"v_obs_y": 0.015645226468919866,
"v_obs_z": 0.006740310710189443,
"r_sun_x": -0.008375571318557293,
"r_sun_y": -0.0021278397223137443,
"r_sun_z": -0.0006896179222345509,
"v_sun_x": 4.014508061373484e-06,
"v_sun_y": -7.199434717117629e-06,
"v_sun_z": -3.1502131721138966e-06,
}
)

Expand Down

0 comments on commit 610f085

Please sign in to comment.