diff --git a/pyproject.toml b/pyproject.toml index 9bda366..6fc31e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ classifiers = [ description = "Create an IOC from a PandA" dependencies = [ "setuptools>=64", - "numpy", + "numpy<2", # until https://github.com/mdavidsaver/p4p/issues/145 is fixed "click", "h5py", "softioc>=4.4.0", diff --git a/src/pandablocks_ioc/_hdf_ioc.py b/src/pandablocks_ioc/_hdf_ioc.py index 84851d4..34810fe 100644 --- a/src/pandablocks_ioc/_hdf_ioc.py +++ b/src/pandablocks_ioc/_hdf_ioc.py @@ -24,14 +24,14 @@ from ._pvi import PviGroup, add_automatic_pvi_info, add_data_capture_pvi_info from ._tables import ReadOnlyPvaTable -from ._types import ONAM_STR, ZNAM_STR, EpicsName +from ._types import ONAM_STR, ZNAM_STR, EpicsName, epics_to_panda_name HDFReceived = Union[ReadyData, StartData, FrameData, EndData] class CaptureMode(Enum): """ - The mode which the circular buffer will use to flush + The mode which the circular buffer will use to flush. """ #: Wait till N frames are recieved then write them @@ -313,13 +313,20 @@ def handle_data(self, data: HDFReceived): @dataclass class Dataset: + """A dataset name and capture mode""" + name: str capture: str class DatasetNameCache: - def __init__(self, datasets: Dict[str, Dataset], datasets_record_name: EpicsName): - self.datasets = datasets + """Used for outputing formatted dataset names in the HDF5 writer, and creating + and updating the HDF5 `DATASETS` table record.""" + + def __init__( + self, datasets: Dict[EpicsName, Dataset], datasets_record_name: EpicsName + ): + self._datasets = datasets self._datasets_table_record = ReadOnlyPvaTable( datasets_record_name, ["Name", "Type"] @@ -332,11 +339,11 @@ def hdf_writer_names(self): """Formats the current dataset names for use in the HDFWriter""" hdf_names: Dict[str, Dict[str, str]] = {} - for record_name, dataset in self.datasets.items(): + for record_name, dataset in self._datasets.items(): if not dataset.name or dataset.capture == "No": continue - field_name = record_name.replace(":", ".") + field_name = epics_to_panda_name(record_name) hdf_names[field_name] = hdf_name = {} @@ -350,7 +357,7 @@ def hdf_writer_names(self): def update_datasets_record(self): dataset_name_list = [ dataset.name - for dataset in self.datasets.values() + for dataset in self._datasets.values() if dataset.name and dataset.capture != "No" ] self._datasets_table_record.update_row("Name", dataset_name_list) @@ -384,7 +391,7 @@ class HDF5RecordController: def __init__( self, client: AsyncioClient, - dataset_cache: Dict[str, Dataset], + dataset_cache: Dict[EpicsName, Dataset], record_prefix: str, ): if find_spec("h5py") is None: diff --git a/src/pandablocks_ioc/_pvi.py b/src/pandablocks_ioc/_pvi.py index 22237f9..2849b24 100644 --- a/src/pandablocks_ioc/_pvi.py +++ b/src/pandablocks_ioc/_pvi.py @@ -216,7 +216,7 @@ def add_positions_table_row( name=epics_to_pvi_name(capture_record_name), label=capture_record_name, pv=capture_record_name, - widget=TextWrite(), + widget=ComboBox(), ), ] diff --git a/src/pandablocks_ioc/_tables.py b/src/pandablocks_ioc/_tables.py index 451f9f3..7a0c659 100644 --- a/src/pandablocks_ioc/_tables.py +++ b/src/pandablocks_ioc/_tables.py @@ -106,7 +106,7 @@ def __init__( "Q:group", { RecordName(f"{block}:PVI"): { - f"pvi.{field.lower().replace(':', '_')}.rw": { + f"pvi.{field.lower().replace(':', '_')}.r": { "+channel": "VAL", "+type": "plain", } diff --git a/src/pandablocks_ioc/ioc.py b/src/pandablocks_ioc/ioc.py index 603106a..f67d238 100644 --- a/src/pandablocks_ioc/ioc.py +++ b/src/pandablocks_ioc/ioc.py @@ -543,7 +543,9 @@ def __init__( # All records should be blocking builder.SetBlocking(True) - self._dataset_cache: Dict[str, Dataset] = {} + # A dataset cache for storing dataset names and capture modes for different + # capture records + self._dataset_cache: Dict[EpicsName, Dataset] = {} def _process_labels( self, labels: List[str], record_value: ScalarRecordValue @@ -890,6 +892,9 @@ def capture_record_on_update(new_capture_mode): on_update=capture_record_on_update, ) + # For now we have to make a `_RecordUpdater`` here and + # combine it with `on_update`. + # https://github.com/PandABlocks/PandABlocks-ioc/issues/121 capture_record_updater = _RecordUpdater( record_dict[capture_record_name], self._record_prefix, @@ -1071,6 +1076,9 @@ def capture_record_on_update(new_capture_mode): initial_value=capture_index, on_update=capture_record_on_update, ) + # For now we have to make a `_RecordUpdater`` here and + # combine it with `on_update`. + # https://github.com/PandABlocks/PandABlocks-ioc/issues/121 capture_record_updater = _RecordUpdater( record_dict[capture_record_name], self._record_prefix, diff --git a/tests/test_hdf_ioc.py b/tests/test_hdf_ioc.py index 58bb3a7..100e15a 100644 --- a/tests/test_hdf_ioc.py +++ b/tests/test_hdf_ioc.py @@ -39,10 +39,12 @@ from pandablocks_ioc._hdf_ioc import ( CaptureMode, Dataset, + DatasetNameCache, HDF5Buffer, HDF5RecordController, NumCapturedSetter, ) +from pandablocks_ioc._types import EpicsName NAMESPACE_PREFIX = "HDF-RECORD-PREFIX" @@ -230,7 +232,7 @@ async def hdf5_controller( test_prefix, hdf5_test_prefix = new_random_hdf5_prefix dataset_name_cache = { - "COUNTER1:OUT": Dataset("some_other_dataset_name", "Value"), + EpicsName("COUNTER1:OUT"): Dataset("some_other_dataset_name", "Value"), } hdf5_controller = HDF5RecordController( @@ -1308,3 +1310,41 @@ def test_hdf_capture_validate_exception( ) assert hdf5_controller._capture_validate(None, 1) is False + + +def test_dataset_name_cache(): + with patch( + "pandablocks_ioc._hdf_ioc.ReadOnlyPvaTable", autospec=True + ) as mock_table: + mock_table_instance = MagicMock() + mock_table.return_value = mock_table_instance + + # Initialize DatasetNameCache + datasets = { + "TEST1:OUT": Dataset("", "Value"), + "TEST2:OUT": Dataset("test2", "No"), + "TEST3:OUT": Dataset("test3", "Value"), + "TEST4:OUT": Dataset("test4", "Min Max Mean"), + "TEST5:OUT": Dataset("test5", "Min Max"), + } + cache = DatasetNameCache(datasets, "record_name") + + # Check that set_rows was called once with the correct arguments + mock_table_instance.set_rows.assert_called_once_with( + ["Name", "Type"], [[], []], length=300, default_data_type=str + ) + cache.update_datasets_record() + + # Check that update_row was called with the correct arguments + mock_table_instance.update_row.assert_any_call( + "Name", ["test3", "test4", "test5"] + ) + mock_table_instance.update_row.assert_any_call( + "Type", ["float64", "float64", "float64"] + ) + + assert cache.hdf_writer_names() == { + "TEST3.OUT": {"Value": "test3"}, + "TEST4.OUT": {"Mean": "test4", "Min": "test4-min", "Max": "test4-max"}, + "TEST5.OUT": {"Min": "test5-min", "Max": "test5-max"}, + }