diff --git a/src/pandablocks/hdf.py b/src/pandablocks/hdf.py index 8008478f..73eb80f2 100644 --- a/src/pandablocks/hdf.py +++ b/src/pandablocks/hdf.py @@ -108,6 +108,7 @@ def create_dataset(self, field: FieldCapture, raw: bool): dataset_name = self.capture_record_hdf_names.get(field.name, {}).get( field.capture, f"{field.name}.{field.capture}" ) + return self.hdf_file.create_dataset( f"/{dataset_name}", dtype=dtype, diff --git a/tests/test_hdf.py b/tests/test_hdf.py index 5ff7a2af..290491ac 100644 --- a/tests/test_hdf.py +++ b/tests/test_hdf.py @@ -2,8 +2,9 @@ from pathlib import Path import numpy as np +import pytest -from pandablocks.hdf import Pipeline, create_default_pipeline, stop_pipeline +from pandablocks.hdf import HDFWriter, Pipeline, create_default_pipeline, stop_pipeline from pandablocks.responses import EndData, EndReason, FieldCapture, FrameData, StartData @@ -58,3 +59,113 @@ def __init__(self): assert num_written_queue.get() == NUMBER_OF_FRAMES_WRITTEN finally: stop_pipeline(pipeline) + + +@pytest.mark.parametrize( + "capture_record_hdf_names,expected_names", + [ + ( + {}, + { + "/COUNTER1.OUT.Value", + "/COUNTER2.OUT.Mean", + "/COUNTER2.OUT.Max", + "/COUNTER2.OUT.Min", + }, + ), + ( + {"COUNTER1.OUT": {"Value": "scientific-name"}}, + { + "/scientific-name", + "/COUNTER2.OUT.Mean", + "/COUNTER2.OUT.Max", + "/COUNTER2.OUT.Min", + }, + ), + ( + { + "COUNTER2.OUT": { + "Mean": "scientific-name", + "Max": "scientific-name-max", + "Min": "scientific-name-min", + } + }, + { + "/COUNTER1.OUT.Value", + "/scientific-name", + "/scientific-name-max", + "/scientific-name-min", + }, + ), + ( + { + "COUNTER1.OUT": {"Value": "scientific-name1"}, + "COUNTER2.OUT": { + "Mean": "scientific-name2", + "Max": "scientific-name2-max", + "Min": "scientific-name2-min", + }, + }, + { + "/scientific-name1", + "/scientific-name2", + "/scientific-name2-max", + "/scientific-name2-min", + }, + ), + ], +) +def test_hdf_writer_uses_alternative_dataset_names( + tmp_path, capture_record_hdf_names, expected_names +): + hdf_writer = HDFWriter( + iter([str(tmp_path / "test_file.h5")]), capture_record_hdf_names + ) + + start_data = StartData( + fields=[ + FieldCapture( + name="COUNTER1.OUT", + type=np.dtype("float64"), + capture="Value", + scale=1, + offset=0, + units="", + ), + FieldCapture( + name="COUNTER2.OUT", + type=np.dtype("float64"), + capture="Min", + scale=1, + offset=0, + units="", + ), + FieldCapture( + name="COUNTER2.OUT", + type=np.dtype("float64"), + capture="Max", + scale=1, + offset=0, + units="", + ), + FieldCapture( + name="COUNTER2.OUT", + type=np.dtype("float64"), + capture="Mean", + scale=1, + offset=0, + units="", + ), + ], + missed=0, + process="Scaled", + format="Framed", + sample_bytes=52, + arm_time="2024-03-05T20:27:12.607841574Z", + start_time="2024-03-05T20:27:12.608875498Z", + hw_time_offset_ns=100555, + ) + + hdf_writer.open_file(start_data) + + assert {dataset.name for dataset in hdf_writer.datasets} == expected_names