Skip to content

Commit

Permalink
WIP: now using a dictionary for alternative names
Browse files Browse the repository at this point in the history
  • Loading branch information
evalott100 committed Jun 7, 2024
1 parent bcb2c2d commit 0de3dc1
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 30 deletions.
24 changes: 12 additions & 12 deletions src/pandablocks_ioc/_hdf_ioc.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import os
from asyncio import CancelledError
from collections import deque
from copy import deepcopy
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
Expand Down Expand Up @@ -70,7 +71,7 @@ def __init__(
status_message_setter: Callable,
number_received_setter: Callable,
number_captured_setter_pipeline: NumCapturedSetter,
dataset_names: Dict[EpicsName, str],
capture_record_hdf_names: Dict[EpicsName, Dict[str, str]],
):
# Only one filename - user must stop capture and set new FileName/FilePath
# for new files
Expand All @@ -95,7 +96,7 @@ def __init__(
self.number_captured_setter_pipeline = number_captured_setter_pipeline
self.number_captured_setter_pipeline.number_captured_setter(0)

self.dataset_names = dataset_names
self.capture_record_hdf_names = capture_record_hdf_names

if (
self.capture_mode == CaptureMode.LAST_N
Expand All @@ -118,7 +119,7 @@ def put_data_to_file(self, data: HDFReceived):
def start_pipeline(self):
self.pipeline = create_default_pipeline(
iter([self.filepath]),
self.dataset_names,
self.capture_record_hdf_names,
self.number_captured_setter_pipeline,
)

Expand Down Expand Up @@ -333,15 +334,15 @@ class HDF5RecordController:
def __init__(
self,
client: AsyncioClient,
dataset_name_getters: Dict[str, Callable[[], str]],
capture_record_hdf_name: Dict[str, Callable[[str], str]],
record_prefix: str,
):
if find_spec("h5py") is None:
logging.warning("No HDF5 support detected - skipping creating HDF5 records")
return

self._client = client
self.dataset_name_getters = dataset_name_getters
self.capture_record_hdf_name = capture_record_hdf_name

path_length = os.pathconf("/", "PC_PATH_MAX")
filename_length = os.pathconf("/", "PC_NAME_MAX")
Expand Down Expand Up @@ -626,10 +627,10 @@ async def _update_directory_path(self, new_val) -> None:
self._directory_exists_record.set(0)

if self._directory_exists_record.get() == 0:
sevr = alarm.MAJOR_ALARM, alrm = alarm.STATE_ALARM
sevr, alrm = alarm.MAJOR_ALARM, alarm.STATE_ALARM
logging.error(status_msg)
else:
sevr = alarm.NO_ALARM, alrm = alarm.NO_ALARM
sevr, alrm = alarm.NO_ALARM, alarm.NO_ALARM
logging.debug(status_msg)

self._status_message_record.set(status_msg, severity=sevr, alarm=alrm)
Expand Down Expand Up @@ -658,18 +659,17 @@ async def _handle_hdf5_data(self) -> None:
number_captured_setter_pipeline = NumCapturedSetter(
self._num_captured_record.set
)

# Get the dataset names, or use the record name if no
# dataset name is provided
buffer = HDF5Buffer(
capture_mode,
filepath,
num_capture,
self._status_message_record.set,
self._num_received_record.set,
number_captured_setter_pipeline,
{
record_name: dataset_name_getter() or record_name
for record_name, dataset_name_getter in
self.dataset_name_getters.items()
},
deepcopy(self.capture_record_hdf_name),
)
flush_period: float = self._flush_period_record.get()
async for data in self._client.data(
Expand Down
57 changes: 40 additions & 17 deletions src/pandablocks_ioc/ioc.py
Original file line number Diff line number Diff line change
Expand Up @@ -537,9 +537,9 @@ def __init__(
self._client = client
self._all_values_dict = all_values_dict

# We require a dictionary of pos out record name to the getter
# for the corresponding dataset name
self._pos_out_dataset_name_getters: Dict[EpicsName, Callable[[], str]] = {}
# A dictionary of record name to capture type to HDF dataset name
# e.g {"COUNTER1": {"Max": "SOME_OTHER_DATASET_NAME"}}
self._capture_record_hdf_names: Dict[EpicsName, Dict[str, str]] = {}

# Set the record prefix
builder.SetDeviceName(self._record_prefix)
Expand Down Expand Up @@ -867,9 +867,35 @@ def _make_pos_out(
)

capture_record_name = EpicsName(record_name + ":CAPTURE")
dataset_record_name = EpicsName(record_name + ":DATASET")
labels, capture_index = self._process_labels(
field_info.capture_labels, values[capture_record_name]
)

def adjust_hdf_field_name_based_on_dataset(dataset_name) -> str:
current_capture_mode = labels[record_dict[capture_record_name].record.get()]

# Throw away all the old settings
self._capture_record_hdf_names[record_name] = {}

# Add a -Max or -Min or -Mean to the dataset name if it exists
if current_capture_mode in ("Min Max", "Min Max Mean"):
for capture_mode_instance in current_capture_mode.split(" "):
self._capture_record_hdf_names[record_name][
capture_mode_instance
] = f"{(dataset_name or record_name)}-{capture_mode_instance}"

# Current capture mode will be the same as the capture mode instance
else:
self._capture_record_hdf_names[record_name][current_capture_mode] = (
dataset_name or f"{record_name}-{current_capture_mode}"
)

def adjust_hdf_field_name_based_on_capture_mode(capture_mode) -> str:
current_dataset_name = record_dict[capture_record_name].record.get()
adjust_hdf_field_name_based_on_dataset(current_dataset_name)

print("RECORD NAME", record_name)
record_dict[capture_record_name] = self._create_record_info(
capture_record_name,
"Capture options",
Expand All @@ -878,6 +904,16 @@ def _make_pos_out(
PviGroup.CAPTURE,
labels=labels,
initial_value=capture_index,
on_update=adjust_hdf_field_name_based_on_capture_mode,
)
record_dict[dataset_record_name] = self._create_record_info(
dataset_record_name,
"Used to adjust the dataset name to one more scientifically relevant",
builder.stringOut,
str,
PviGroup.CAPTURE,
initial_value="",
on_update=adjust_hdf_field_name_based_on_dataset,
)

offset_record_name = EpicsName(record_name + ":OFFSET")
Expand Down Expand Up @@ -921,19 +957,6 @@ def _make_pos_out(
DESC="Value with scaling applied",
)

dataset_record_name = EpicsName(record_name + ":DATASET")
record_dict[dataset_record_name] = self._create_record_info(
dataset_record_name,
"Used to adjust the dataset name to one more scientifically relevant",
builder.stringOut,
str,
PviGroup.CAPTURE,
initial_value="",
)
self._pos_out_dataset_name_getters[record_name] = record_dict[
dataset_record_name
].record.get

# Create the POSITIONS "table" of records. Most are aliases of the records
# created above.
positions_record_name = f"POSITIONS:{self._pos_out_row_counter}"
Expand Down Expand Up @@ -1784,7 +1807,7 @@ def create_block_records(
add_pcap_arm_pvi_info(PviGroup.INPUTS, pcap_arm_record)

HDF5RecordController(
self._client, self._pos_out_dataset_name_getters, self._record_prefix
self._client, self._capture_record_hdf_names, self._record_prefix
)

return record_dict
Expand Down
2 changes: 1 addition & 1 deletion tests/test_hdf_ioc.py
Original file line number Diff line number Diff line change
Expand Up @@ -628,7 +628,7 @@ async def test_hdf5_file_writing_last_n_endreason_not_ok(

# Initially Status should be "Dir exists and is writable"
val = await caget(hdf5_test_prefix + ":Status", datatype=DBR_CHAR_STR)
assert val == "OK"
assert val == "Dir exists and is writable"

await caput(hdf5_test_prefix + ":Capture", 1, wait=True, timeout=TIMEOUT)

Expand Down

0 comments on commit 0de3dc1

Please sign in to comment.