Skip to content

Commit

Permalink
WIP: making test that dataset name is correct
Browse files Browse the repository at this point in the history
  • Loading branch information
evalott100 committed Jun 10, 2024
1 parent ea83a6a commit 84a6134
Show file tree
Hide file tree
Showing 2 changed files with 114 additions and 1 deletion.
2 changes: 2 additions & 0 deletions src/pandablocks/hdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ def create_dataset(self, field: FieldCapture, raw: bool):
dataset_name = self.capture_record_hdf_names.get(field.name, {}).get(
field.capture, f"{field.name}.{field.capture}"
)

return self.hdf_file.create_dataset(
f"/{dataset_name}",
dtype=dtype,
Expand All @@ -124,6 +125,7 @@ def open_file(self, data: StartData):
)
raise
self.hdf_file = h5py.File(self.file_path, "w", libver="latest")
print(data.process)
raw = data.process == "Raw"
self.datasets = [self.create_dataset(field, raw) for field in data.fields]
self.hdf_file.swmr_mode = True
Expand Down
113 changes: 112 additions & 1 deletion tests/test_hdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@
from pathlib import Path

import numpy as np
import pytest

from pandablocks.hdf import Pipeline, create_default_pipeline, stop_pipeline
from pandablocks.hdf import HDFWriter, Pipeline, create_default_pipeline, stop_pipeline
from pandablocks.responses import EndData, EndReason, FieldCapture, FrameData, StartData


Expand Down Expand Up @@ -58,3 +59,113 @@ def __init__(self):
assert num_written_queue.get() == NUMBER_OF_FRAMES_WRITTEN
finally:
stop_pipeline(pipeline)


@pytest.mark.parametrize(
"capture_record_hdf_names,expected_names",
[
(
{},
{
"/COUNTER1.OUT.Value",
"/COUNTER2.OUT.Mean",
"/COUNTER2.OUT.Max",
"/COUNTER2.OUT.Min",
},
),
(
{"COUNTER1.OUT": {"Value": "scientific-name"}},
{
"/scientific-name",
"/COUNTER2.OUT.Mean",
"/COUNTER2.OUT.Max",
"/COUNTER2.OUT.Min",
},
),
(
{
"COUNTER2.OUT": {
"Mean": "scientific-name",
"Max": "scientific-name-max",
"Min": "scientific-name-min",
}
},
{
"/COUNTER1.OUT.Value",
"/scientific-name",
"/scientific-name-max",
"/scientific-name-min",
},
),
(
{
"COUNTER1.OUT": {"Value": "scientific-name1"},
"COUNTER2.OUT": {
"Mean": "scientific-name2",
"Max": "scientific-name2-max",
"Min": "scientific-name2-min",
},
},
{
"/scientific-name1",
"/scientific-name2",
"/scientific-name2-max",
"/scientific-name2-min",
},
),
],
)
def test_hdf_writer_uses_alternative_dataset_names(
tmp_path, capture_record_hdf_names, expected_names
):
hdf_writer = HDFWriter(
iter([str(tmp_path / "test_file.h5")]), capture_record_hdf_names
)

start_data = StartData(
fields=[
FieldCapture(
name="COUNTER1.OUT",
type=np.dtype("float64"),
capture="Value",
scale=1,
offset=0,
units="",
),
FieldCapture(
name="COUNTER2.OUT",
type=np.dtype("float64"),
capture="Min",
scale=1,
offset=0,
units="",
),
FieldCapture(
name="COUNTER2.OUT",
type=np.dtype("float64"),
capture="Max",
scale=1,
offset=0,
units="",
),
FieldCapture(
name="COUNTER2.OUT",
type=np.dtype("float64"),
capture="Mean",
scale=1,
offset=0,
units="",
),
],
missed=0,
process="Scaled",
format="Framed",
sample_bytes=52,
arm_time="2024-03-05T20:27:12.607841574Z",
start_time="2024-03-05T20:27:12.608875498Z",
hw_time_offset_ns=100555,
)

hdf_writer.open_file(start_data)

assert {dataset.name for dataset in hdf_writer.datasets} == expected_names

0 comments on commit 84a6134

Please sign in to comment.