diff --git a/bindings/python/src/pipeline/node/CameraBindings.cpp b/bindings/python/src/pipeline/node/CameraBindings.cpp index d0d6554eb..759179561 100644 --- a/bindings/python/src/pipeline/node/CameraBindings.cpp +++ b/bindings/python/src/pipeline/node/CameraBindings.cpp @@ -3,6 +3,7 @@ #include "depthai/pipeline/Node.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/pipeline/node/Camera.hpp" +#include "depthai/pipeline/node/host/Replay.hpp" void bind_camera(pybind11::module& m, void* pCallstack) { using namespace dai; @@ -21,9 +22,15 @@ void bind_camera(pybind11::module& m, void* pCallstack) { // Actual bindings camera.def_readonly("inputControl", &Camera::inputControl, DOC(dai, node, Camera, inputControl)) .def_readonly("initialControl", &Camera::initialControl, DOC(dai, node, Camera, initialControl)) - .def("build", &Camera::build, "boardSocket"_a = CameraBoardSocket::AUTO, DOC(dai, node, Camera, build)) + .def_readonly("mockIsp", &Camera::mockIsp, DOC(dai, node, Camera, mockIsp)) + .def("build", py::overload_cast(&Camera::build), "boardSocket"_a = CameraBoardSocket::AUTO, DOC(dai, node, Camera, build)) +#ifdef DEPTHAI_HAVE_OPENCV_SUPPORT + .def("build", py::overload_cast(&Camera::build), py::arg("boardSocket"), py::arg("replayNode"), DOC(dai, node, Camera, build)) + .def("build", py::overload_cast(&Camera::build), py::arg("replayNode"), DOC(dai, node, Camera, build)) +#endif // .def("setBoardSocket", &Camera::setBoardSocket, "boardSocket"_a, DOC(dai, node, Camera, setBoardSocket)) .def("getBoardSocket", &Camera::getBoardSocket, DOC(dai, node, Camera, getBoardSocket)) + .def("setMockIsp", &Camera::setMockIsp, "mockIsp"_a, DOC(dai, node, Camera, setMockIsp)) // .def("setCamera", &Camera::setCamera, "name"_a, DOC(dai, node, Camera, setCamera)) // .def("getCamera", &Camera::getCamera, DOC(dai, node, Camera, getCamera)) .def("requestOutput", diff --git a/bindings/python/src/pipeline/node/IMUBindings.cpp b/bindings/python/src/pipeline/node/IMUBindings.cpp index c4655bb8c..087e6fa0b 100644 --- a/bindings/python/src/pipeline/node/IMUBindings.cpp +++ b/bindings/python/src/pipeline/node/IMUBindings.cpp @@ -60,7 +60,9 @@ void bind_imu(pybind11::module& m, void* pCallstack) { .def_readwrite("enableFirmwareUpdate", &IMUProperties::enableFirmwareUpdate, DOC(dai, IMUProperties, enableFirmwareUpdate)); // Node - imu.def_readonly("out", &IMU::out, DOC(dai, node, IMU, out)) + imu + .def_readonly("out", &IMU::out, DOC(dai, node, IMU, out)) + .def_readonly("mockIn", &IMU::mockIn, DOC(dai, node, IMU, mockIn)) .def("enableIMUSensor", static_cast(&IMU::enableIMUSensor), py::arg("sensorConfig"), diff --git a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake index 270400993..757abfc15 100644 --- a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake +++ b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake @@ -4,4 +4,4 @@ set(DEPTHAI_DEVICE_RVC4_MATURITY "snapshot") # "version if applicable" # set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+93f7b75a885aa32f44c5e9f53b74470c49d2b1af") -set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+e15e34e96495e1ac3d4c064499d45f33784cba8a") +set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+cb02832972593df1fa893eb81796b84c5b28fd3c") diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 32505dc84..4c8c296c2 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "9af6aa33e593ce207ad00d09e528ce85181d543b") +set(DEPTHAI_DEVICE_SIDE_COMMIT "bb2e7846c91edf48b2a881caff3e47d5c4764c2a") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/examples/cpp/CMakeLists.txt b/examples/cpp/CMakeLists.txt index 25b442483..ac4a657a7 100644 --- a/examples/cpp/CMakeLists.txt +++ b/examples/cpp/CMakeLists.txt @@ -263,15 +263,17 @@ dai_add_example(image_manip_v2_multi_ops ImageManip/image_manip_v2_multi_ops.cpp dai_add_example(image_manip_color_conversion ImageManip/image_manip_color_conversion.cpp OFF OFF) # Record -dai_add_example(record_encoded_h264 RVC2/Record/record_encoded_h264.cpp OFF OFF) -dai_add_example(record_encoded_mjpeg RVC2/Record/record_encoded_mjpeg.cpp OFF OFF) -dai_add_example(record_raw_color RVC2/Record/record_raw_color.cpp OFF OFF) -dai_add_example(record_raw_gray RVC2/Record/record_raw_gray.cpp OFF OFF) -dai_add_example(record_imu RVC2/Record/record_imu.cpp OFF OFF) +dai_add_example(record_encoded_h264 Record/record_encoded_h264.cpp OFF OFF) +dai_add_example(record_encoded_mjpeg Record/record_encoded_mjpeg.cpp OFF OFF) +dai_add_example(record_raw_color Record/record_raw_color.cpp OFF OFF) +dai_add_example(record_raw_gray Record/record_raw_gray.cpp OFF OFF) +dai_add_example(record_imu Record/record_imu.cpp OFF OFF) +dai_add_example(holistic_record Record/holistic_record.cpp OFF OFF) # Replay -dai_add_example(replay_video_meta RVC2/Replay/replay_video_meta.cpp OFF OFF) -dai_add_example(replay_imu RVC2/Replay/replay_imu.cpp OFF OFF) +dai_add_example(replay_video_meta Replay/replay_video_meta.cpp OFF OFF) +dai_add_example(replay_imu Replay/replay_imu.cpp OFF OFF) +dai_add_example(holistic_replay Replay/holistic_replay.cpp OFF OFF) # StereoDepth dai_add_example(depth_preview StereoDepth/depth_preview.cpp OFF OFF) @@ -281,6 +283,7 @@ dai_add_example(thermal RVC2/Thermal/thermal.cpp OFF OFF) # Host nodes dai_add_example(rgb_video RVC2/ColorCamera/rgb_video.cpp OFF OFF) +dai_add_example(rgb_video_camera RVC2/Camera/rgb_video_camera.cpp OFF OFF) dai_add_example(host_node HostNodes/host_node.cpp OFF OFF) dai_add_example(threaded_host_node HostNodes/threaded_host_node.cpp OFF OFF) diff --git a/examples/cpp/RVC2/Camera/rgb_video_camera.cpp b/examples/cpp/RVC2/Camera/rgb_video_camera.cpp new file mode 100644 index 000000000..0dc839d6d --- /dev/null +++ b/examples/cpp/RVC2/Camera/rgb_video_camera.cpp @@ -0,0 +1,31 @@ +// Includes common necessary includes for development using depthai library +#include "depthai/common/CameraBoardSocket.hpp" +#include "depthai/depthai.hpp" +#include "depthai/pipeline/datatype/ImgFrame.hpp" +#include "depthai/pipeline/node/host/Replay.hpp" + +int main(int argc, char** argv) { + // Create pipeline + auto device = argc > 1 ? std::make_shared(argv[1]) : std::make_shared(); + dai::Pipeline pipeline(device); + + // Define source and output + auto camRgb = pipeline.create()->build(dai::CameraBoardSocket::CAM_A); + + auto outputQueue = camRgb->requestOutput({1280, 720}, dai::ImgFrame::Type::NV12)->createOutputQueue(); + + pipeline.start(); + while(pipeline.isRunning()) { + auto videoIn = outputQueue->get(); + + // Get BGR frame from NV12 encoded video frame to show with opencv + // Visualizing the frame on slower hosts might have overhead + cv::imshow("video", videoIn->getCvFrame()); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + pipeline.stop(); + } + } + return 0; +} diff --git a/examples/cpp/RVC2/Record/holistic_record.cpp b/examples/cpp/Record/holistic_record.cpp similarity index 53% rename from examples/cpp/RVC2/Record/holistic_record.cpp rename to examples/cpp/Record/holistic_record.cpp index 65817f021..1219f676b 100644 --- a/examples/cpp/RVC2/Record/holistic_record.cpp +++ b/examples/cpp/Record/holistic_record.cpp @@ -7,21 +7,20 @@ #endif int main(int argc, char** argv) { - dai::Pipeline pipeline(true); - auto cam = pipeline.create(); + dai::Pipeline pipeline; + auto cam = pipeline.create()->build(); auto imu = pipeline.create(); auto display = pipeline.create(); - cam->setBoardSocket(dai::CameraBoardSocket::CAM_A); - cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - cam->setFps(30); + auto camOut = cam->requestOutput({720, 1280}, dai::ImgFrame::Type::NV12); // enable ACCELEROMETER_RAW at 500 hz rate imu->enableIMUSensor(dai::IMUSensor::ACCELEROMETER_RAW, 500); // enable GYROSCOPE_RAW at 400 hz rate imu->enableIMUSensor(dai::IMUSensor::GYROSCOPE_RAW, 400); + imu->setBatchReportThreshold(100); - cam->video.link(display->input); + camOut->link(display->input); auto q = imu->out.createOutputQueue(); dai::RecordConfig config; @@ -36,17 +35,21 @@ int main(int argc, char** argv) { auto start = std::chrono::steady_clock::now(); - while(std::chrono::steady_clock::now() - start < std::chrono::seconds(15)) { - auto imuData = q->get(); - auto imuPackets = imuData->packets; - for(auto& imuPacket : imuPackets) { - auto& acceleroValues = imuPacket.acceleroMeter; - auto& gyroValues = imuPacket.gyroscope; - - printf("Accelerometer [m/s^2]: x: %.3f y: %.3f z: %.3f \n", acceleroValues.x, acceleroValues.y, acceleroValues.z); - printf("Gyroscope [rad/s]: x: %.3f y: %.3f z: %.3f \n", gyroValues.x, gyroValues.y, gyroValues.z); + try { + while(std::chrono::steady_clock::now() - start < std::chrono::seconds(15)) { + auto imuData = q->get(); + auto imuPackets = imuData->packets; + for(auto& imuPacket : imuPackets) { + auto& acceleroValues = imuPacket.acceleroMeter; + auto& gyroValues = imuPacket.gyroscope; + + // printf("Accelerometer [m/s^2]: x: %.3f y: %.3f z: %.3f \n", acceleroValues.x, acceleroValues.y, acceleroValues.z); + // printf("Gyroscope [rad/s]: x: %.3f y: %.3f z: %.3f \n", gyroValues.x, gyroValues.y, gyroValues.z); + } + std::this_thread::sleep_for(std::chrono::milliseconds(1)); } - std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } catch(...) { + } pipeline.stop(); diff --git a/examples/cpp/RVC2/Record/record_encoded_h264.cpp b/examples/cpp/Record/record_encoded_h264.cpp similarity index 68% rename from examples/cpp/RVC2/Record/record_encoded_h264.cpp rename to examples/cpp/Record/record_encoded_h264.cpp index 5df240585..6411d6abf 100644 --- a/examples/cpp/RVC2/Record/record_encoded_h264.cpp +++ b/examples/cpp/Record/record_encoded_h264.cpp @@ -1,5 +1,7 @@ #include +#include "depthai/capabilities/ImgFrameCapability.hpp" +#include "depthai/common/CameraBoardSocket.hpp" #include "depthai/pipeline/node/host/Record.hpp" #ifndef DEPTHAI_HAVE_OPENCV_SUPPORT @@ -8,7 +10,7 @@ int main(int argc, char** argv) { dai::Pipeline pipeline(true); - auto cam = pipeline.create(); + auto cam = pipeline.create()->build(dai::CameraBoardSocket::CAM_A); auto display = pipeline.create(); auto videoEncoder = pipeline.create(); auto record = pipeline.create(); @@ -17,14 +19,12 @@ int main(int argc, char** argv) { record->setRecordVideoFile(path + std::string(".mp4")); record->setRecordMetadataFile(path + std::string(".mcap")); - cam->setBoardSocket(dai::CameraBoardSocket::CAM_A); - cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - cam->setFps(30); + auto* camOut = cam->requestOutput({1920, 1440}, dai::ImgFrame::Type::NV12, dai::ImgResizeMode::CROP, 30.f); videoEncoder->setProfile(dai::VideoEncoderProperties::Profile::H264_MAIN); - cam->video.link(videoEncoder->input); - cam->video.link(display->input); + camOut->link(videoEncoder->input); + camOut->link(display->input); videoEncoder->out.link(record->input); pipeline.run(); // Let the display node stop the pipeline diff --git a/examples/cpp/RVC2/Record/record_encoded_mjpeg.cpp b/examples/cpp/Record/record_encoded_mjpeg.cpp similarity index 100% rename from examples/cpp/RVC2/Record/record_encoded_mjpeg.cpp rename to examples/cpp/Record/record_encoded_mjpeg.cpp diff --git a/examples/cpp/RVC2/Record/record_imu.cpp b/examples/cpp/Record/record_imu.cpp similarity index 100% rename from examples/cpp/RVC2/Record/record_imu.cpp rename to examples/cpp/Record/record_imu.cpp diff --git a/examples/cpp/RVC2/Record/record_raw_color.cpp b/examples/cpp/Record/record_raw_color.cpp similarity index 100% rename from examples/cpp/RVC2/Record/record_raw_color.cpp rename to examples/cpp/Record/record_raw_color.cpp diff --git a/examples/cpp/RVC2/Record/record_raw_gray.cpp b/examples/cpp/Record/record_raw_gray.cpp similarity index 100% rename from examples/cpp/RVC2/Record/record_raw_gray.cpp rename to examples/cpp/Record/record_raw_gray.cpp diff --git a/examples/cpp/RVC2/Replay/holistic_replay.cpp b/examples/cpp/Replay/holistic_replay.cpp similarity index 83% rename from examples/cpp/RVC2/Replay/holistic_replay.cpp rename to examples/cpp/Replay/holistic_replay.cpp index e45887303..241668937 100644 --- a/examples/cpp/RVC2/Replay/holistic_replay.cpp +++ b/examples/cpp/Replay/holistic_replay.cpp @@ -7,21 +7,19 @@ #endif int main(int argc, char** argv) { - dai::Pipeline pipeline(true); - auto cam = pipeline.create(); + dai::Pipeline pipeline; + auto cam = pipeline.create()->build(); auto imu = pipeline.create(); auto display = pipeline.create(); - cam->setBoardSocket(dai::CameraBoardSocket::CAM_A); - cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - cam->setFps(30); + auto camOut = cam->requestOutput({720, 1280}, dai::ImgFrame::Type::NV12); // enable ACCELEROMETER_RAW at 500 hz rate imu->enableIMUSensor(dai::IMUSensor::ACCELEROMETER_RAW, 500); // enable GYROSCOPE_RAW at 400 hz rate imu->enableIMUSensor(dai::IMUSensor::GYROSCOPE_RAW, 400); - cam->video.link(display->input); + camOut->link(display->input); auto q = imu->out.createOutputQueue(); pipeline.enableHolisticReplay(argc > 1 ? std::string(argv[1]) : "recording.tar.gz"); diff --git a/examples/cpp/RVC2/Replay/replay_imu.cpp b/examples/cpp/Replay/replay_imu.cpp similarity index 100% rename from examples/cpp/RVC2/Replay/replay_imu.cpp rename to examples/cpp/Replay/replay_imu.cpp diff --git a/examples/cpp/RVC2/Replay/replay_video.cpp b/examples/cpp/Replay/replay_video.cpp similarity index 100% rename from examples/cpp/RVC2/Replay/replay_video.cpp rename to examples/cpp/Replay/replay_video.cpp diff --git a/examples/cpp/RVC2/Replay/replay_video_meta.cpp b/examples/cpp/Replay/replay_video_meta.cpp similarity index 100% rename from examples/cpp/RVC2/Replay/replay_video_meta.cpp rename to examples/cpp/Replay/replay_video_meta.cpp diff --git a/examples/python/RVC2/RecordReplay/.gitignore b/examples/python/RecordReplay/.gitignore similarity index 100% rename from examples/python/RVC2/RecordReplay/.gitignore rename to examples/python/RecordReplay/.gitignore diff --git a/examples/python/RVC2/RecordReplay/holistic_record.py b/examples/python/RecordReplay/holistic_record.py similarity index 82% rename from examples/python/RVC2/RecordReplay/holistic_record.py rename to examples/python/RecordReplay/holistic_record.py index 30993bd97..102782500 100644 --- a/examples/python/RVC2/RecordReplay/holistic_record.py +++ b/examples/python/RecordReplay/holistic_record.py @@ -15,17 +15,13 @@ # Create pipeline with dai.Pipeline(True) as pipeline: # Define source and output - camRgb = pipeline.create(dai.node.ColorCamera) - - # Properties - camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) - camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) - camRgb.setVideoSize(1920, 1080) - camRgb.setFps(30) + camRgb = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A) + camRgbOut = camRgb.requestOutput((1920, 1080), fps = 30) imu = pipeline.create(dai.node.IMU) imu.enableIMUSensor(dai.IMUSensor.ACCELEROMETER_RAW, 500); imu.enableIMUSensor(dai.IMUSensor.GYROSCOPE_RAW, 400); + imu.setBatchReportThreshold(100) config = dai.RecordConfig() config.outputDir = args.output; @@ -35,7 +31,7 @@ pipeline.enableHolisticRecord(config) - videoQueue = camRgb.preview.createOutputQueue() + videoQueue = camRgbOut.createOutputQueue() imuQueue = imu.out.createOutputQueue() # Connect to device and start pipeline diff --git a/examples/python/RVC2/RecordReplay/holistic_replay.py b/examples/python/RecordReplay/holistic_replay.py similarity index 79% rename from examples/python/RVC2/RecordReplay/holistic_replay.py rename to examples/python/RecordReplay/holistic_replay.py index 9f7e94520..3e0420369 100644 --- a/examples/python/RVC2/RecordReplay/holistic_replay.py +++ b/examples/python/RecordReplay/holistic_replay.py @@ -11,21 +11,17 @@ # Create pipeline with dai.Pipeline(True) as pipeline: # Define source and output - camRgb = pipeline.create(dai.node.ColorCamera) - - # Properties - camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) - camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) - camRgb.setVideoSize(1920, 1080) - camRgb.setFps(30) + camRgb = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A) + camRgbOut = camRgb.requestOutput((1920, 1080), fps = 30) imu = pipeline.create(dai.node.IMU) imu.enableIMUSensor(dai.IMUSensor.ACCELEROMETER_RAW, 500); imu.enableIMUSensor(dai.IMUSensor.GYROSCOPE_RAW, 400); + imu.setBatchReportThreshold(100) pipeline.enableHolisticReplay(args.source) - videoQueue = camRgb.preview.createOutputQueue() + videoQueue = camRgbOut.createOutputQueue() imuQueue = imu.out.createOutputQueue() # Connect to device and start pipeline diff --git a/examples/python/RVC2/RecordReplay/record_video.py b/examples/python/RecordReplay/record_video.py similarity index 100% rename from examples/python/RVC2/RecordReplay/record_video.py rename to examples/python/RecordReplay/record_video.py diff --git a/examples/python/RVC2/RecordReplay/replay_video.py b/examples/python/RecordReplay/replay_video.py similarity index 72% rename from examples/python/RVC2/RecordReplay/replay_video.py rename to examples/python/RecordReplay/replay_video.py index 9f6e54c2f..85248b54f 100644 --- a/examples/python/RVC2/RecordReplay/replay_video.py +++ b/examples/python/RecordReplay/replay_video.py @@ -5,7 +5,8 @@ import cv2 parser = argparse.ArgumentParser() -parser.add_argument("-i", "--inputVideo", default="test_video.mp4", help="Input video name") +parser.add_argument("-v", "--inputVideo", default="test_video.mp4", help="Input video name") +parser.add_argument("-m", "--inputMetadata", default="test_video.mcap", help="Input metadata name") args = parser.parse_args() @@ -17,19 +18,17 @@ with dai.Pipeline() as pipeline: replay = pipeline.create(dai.node.ReplayVideo) replay.setReplayVideoFile(Path(args.inputVideo)) + if Path(args.inputMetadata).exists(): + replay.setReplayMetadataFile(Path(args.inputMetadata)) replay.setOutFrameType(dai.ImgFrame.Type.NV12) replay.setLoop(False) - imageManip = pipeline.create(dai.node.ImageManip) - imageManip.initialConfig.setResize(300, 300) - imageManip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p) - replay.out.link(imageManip.inputImage) - manipOutQueue = imageManip.out.createOutputQueue() + videoOut = replay.out.createOutputQueue() pipeline.start() while pipeline.isRunning() and replay.isRunning(): try: - outFrame : dai.ImgFrame = manipOutQueue.get() + outFrame : dai.ImgFrame = videoOut.get() except dai.MessageQueue.QueueException: # Replay stopped the pipeline break diff --git a/include/depthai/common/ImgTransformations.hpp b/include/depthai/common/ImgTransformations.hpp index 55f37e55e..62ba8bc27 100644 --- a/include/depthai/common/ImgTransformations.hpp +++ b/include/depthai/common/ImgTransformations.hpp @@ -26,12 +26,12 @@ struct ImgTransformation { size_t width = 0; size_t height = 0; + std::vector srcCrops = {}; + dai::RotatedRect srcCrop; dai::RotatedRect dstCrop; bool cropsValid = false; - std::vector srcCrops = {}; - void calcCrops(); public: @@ -151,6 +151,7 @@ struct ImgTransformation { * @return Vertical field of view in degrees */ float getVFov(bool source = false) const; + std::vector getSrcCrops() const; /** diff --git a/include/depthai/pipeline/datatype/EncodedFrame.hpp b/include/depthai/pipeline/datatype/EncodedFrame.hpp index 0dfb464f3..3a7d1d8ee 100644 --- a/include/depthai/pipeline/datatype/EncodedFrame.hpp +++ b/include/depthai/pipeline/datatype/EncodedFrame.hpp @@ -172,13 +172,20 @@ class EncodedFrame : public Buffer, public ProtoSerializable { */ EncodedFrame& setProfile(Profile profile); + ImgFrame getImgFrameMeta() const; + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::EncodedFrame; + }; + #ifdef DEPTHAI_ENABLE_PROTOBUF /** * Serialize message to proto buffer * * @returns serialized message */ - std::vector serializeProto() const override; + std::vector serializeProto(bool metadataOnly = false) const override; /** * Serialize schema to proto buffer @@ -204,11 +211,6 @@ class EncodedFrame : public Buffer, public ProtoSerializable { Buffer::sequenceNum, Buffer::ts, Buffer::tsDevice); - - void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { - metadata = utility::serialize(*this); - datatype = DatatypeEnum::EncodedFrame; - }; }; } // namespace dai diff --git a/include/depthai/pipeline/datatype/IMUData.hpp b/include/depthai/pipeline/datatype/IMUData.hpp index ea59f44fd..836812823 100644 --- a/include/depthai/pipeline/datatype/IMUData.hpp +++ b/include/depthai/pipeline/datatype/IMUData.hpp @@ -5,6 +5,8 @@ #include #include "depthai/pipeline/datatype/Buffer.hpp" +#include "depthai/utility/ProtoSerializable.hpp" + namespace dai { struct IMUReport { @@ -211,7 +213,7 @@ DEPTHAI_SERIALIZE_EXT(IMUPacket, acceleroMeter, gyroscope, magneticField, rotati /** * IMUData message. Carries normalized detection results */ -class IMUData : public Buffer { +class IMUData : public Buffer, public ProtoSerializable { public: // Construct IMUData message IMUData() = default; @@ -224,7 +226,21 @@ class IMUData : public Buffer { datatype = DatatypeEnum::IMUData; }; - span getRecordData() const override; +#ifdef DEPTHAI_ENABLE_PROTOBUF + /** + * Serialize message to proto buffer + * + * @returns serialized message + */ + std::vector serializeProto(bool = false) const override; + + /** + * Serialize schema to proto buffer + * + * @returns serialized schema + */ + ProtoSerializable::SchemaPair serializeSchema() const override; +#endif DEPTHAI_SERIALIZE(IMUData, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, packets); }; diff --git a/include/depthai/pipeline/datatype/ImageAnnotations.hpp b/include/depthai/pipeline/datatype/ImageAnnotations.hpp new file mode 100644 index 000000000..6efb8930e --- /dev/null +++ b/include/depthai/pipeline/datatype/ImageAnnotations.hpp @@ -0,0 +1,145 @@ + +#pragma once +#include "depthai/common/Color.hpp" +#include "depthai/common/Point2f.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" +#include "depthai/schemas/ImageAnnotations.pb.h" +#include "depthai/utility/ProtoSerializable.hpp" + +namespace dai { +struct CircleAnnotation { + Point2f position; + float diameter; + float thickness; + Color fillColor; + Color outlineColor; +}; + +DEPTHAI_SERIALIZE_EXT(CircleAnnotation, position, diameter, thickness, fillColor, outlineColor); + +enum class PointsAnnotationType : std::uint8_t { UNKNOWN = 0, POINTS = 1, LINE_LOOP = 2, LINE_STRIP = 3, LINE_LIST = 4 }; + +struct PointsAnnotation { + PointsAnnotationType type; + std::vector points; + Color outlineColor; + std::vector outlineColors; + Color fillColor; + float thickness; +}; + +DEPTHAI_SERIALIZE_EXT(PointsAnnotation, type, points, outlineColor, outlineColors, fillColor, thickness); + +struct TextAnnotation { + Point2f position; + std::string text; + float fontSize; + Color textColor; + Color backgroundColor; +}; +DEPTHAI_SERIALIZE_EXT(TextAnnotation, position, text, fontSize, textColor, backgroundColor); +struct ImageAnnotation { + std::vector circles; + std::vector points; + std::vector texts; +}; + +DEPTHAI_SERIALIZE_EXT(ImageAnnotation, circles, points, texts); + +/** + * ImageAnnotations message. Carries annotations for an image. + */ +class ImageAnnotations : public Buffer, public utility::ProtoSerializable { + public: + /** + * Construct ImageAnnotations message. + */ + ImageAnnotations() = default; + explicit ImageAnnotations(std::vector annotations) : annotations(std::move(annotations)) {} + + virtual ~ImageAnnotations() = default; + + /// Transform + std::vector annotations; + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::ImageAnnotations; + }; + std::unique_ptr getProtoMessage(bool = true) const override { + auto imageAnnotations = std::make_unique(); + + imageAnnotations->set_sequencenum(this->sequenceNum); + proto::common::Timestamp* ts = imageAnnotations->mutable_ts(); + ts->set_sec(this->ts.sec); + ts->set_nsec(this->ts.nsec); + proto::common::Timestamp* tsDevice = imageAnnotations->mutable_tsdevice(); + tsDevice->set_sec(this->tsDevice.sec); + tsDevice->set_nsec(this->tsDevice.nsec); + + for(const auto& annotation : this->annotations) { + proto::image_annotations::ImageAnnotation* imageAnnotation = imageAnnotations->add_annotations(); + for(const auto& circle : annotation.circles) { + proto::image_annotations::CircleAnnotation* circleAnnotation = imageAnnotation->add_circles(); + circleAnnotation->mutable_position()->set_x(circle.position.x); + circleAnnotation->mutable_position()->set_y(circle.position.y); + circleAnnotation->set_diameter(circle.diameter); + circleAnnotation->set_thickness(circle.thickness); + circleAnnotation->mutable_fillcolor()->set_r(circle.fillColor.r); + circleAnnotation->mutable_fillcolor()->set_g(circle.fillColor.g); + circleAnnotation->mutable_fillcolor()->set_b(circle.fillColor.b); + circleAnnotation->mutable_fillcolor()->set_a(circle.fillColor.a); + circleAnnotation->mutable_outlinecolor()->set_r(circle.outlineColor.r); + circleAnnotation->mutable_outlinecolor()->set_g(circle.outlineColor.g); + circleAnnotation->mutable_outlinecolor()->set_b(circle.outlineColor.b); + circleAnnotation->mutable_outlinecolor()->set_a(circle.outlineColor.a); + } + for(const auto& points : annotation.points) { + proto::image_annotations::PointsAnnotation* pointsAnnotation = imageAnnotation->add_points(); + PointsAnnotationType type = points.type; + pointsAnnotation->set_type(static_cast(type)); + for(const auto& point : points.points) { + proto::common::Point2f* protoPoint = pointsAnnotation->add_points(); + protoPoint->set_x(point.x); + protoPoint->set_y(point.y); + } + pointsAnnotation->mutable_outlinecolor()->set_r(points.outlineColor.r); + pointsAnnotation->mutable_outlinecolor()->set_g(points.outlineColor.g); + pointsAnnotation->mutable_outlinecolor()->set_b(points.outlineColor.b); + pointsAnnotation->mutable_outlinecolor()->set_a(points.outlineColor.a); + for(const auto& color : points.outlineColors) { + proto::common::Color* protoColor = pointsAnnotation->add_outlinecolors(); + protoColor->set_r(color.r); + protoColor->set_g(color.g); + protoColor->set_b(color.b); + protoColor->set_a(color.a); + } + pointsAnnotation->mutable_fillcolor()->set_r(points.fillColor.r); + pointsAnnotation->mutable_fillcolor()->set_g(points.fillColor.g); + pointsAnnotation->mutable_fillcolor()->set_b(points.fillColor.b); + pointsAnnotation->mutable_fillcolor()->set_a(points.fillColor.a); + pointsAnnotation->set_thickness(points.thickness); + } + for(const auto& text : annotation.texts) { + proto::image_annotations::TextAnnotation* textAnnotation = imageAnnotation->add_texts(); + textAnnotation->mutable_position()->set_x(text.position.x); + textAnnotation->mutable_position()->set_y(text.position.y); + textAnnotation->set_text(text.text); + textAnnotation->set_fontsize(text.fontSize); + textAnnotation->mutable_textcolor()->set_r(text.textColor.r); + textAnnotation->mutable_textcolor()->set_g(text.textColor.g); + textAnnotation->mutable_textcolor()->set_b(text.textColor.b); + textAnnotation->mutable_textcolor()->set_a(text.textColor.a); + textAnnotation->mutable_backgroundcolor()->set_r(text.backgroundColor.r); + textAnnotation->mutable_backgroundcolor()->set_g(text.backgroundColor.g); + textAnnotation->mutable_backgroundcolor()->set_b(text.backgroundColor.b); + textAnnotation->mutable_backgroundcolor()->set_a(text.backgroundColor.a); + } + } + return imageAnnotations; + } + + DEPTHAI_SERIALIZE(ImageAnnotations, Buffer::sequenceNum, Buffer::ts, Buffer::tsDevice, annotations); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatype/ImgAnnotations.hpp b/include/depthai/pipeline/datatype/ImgAnnotations.hpp index fcc951f8f..362f461df 100644 --- a/include/depthai/pipeline/datatype/ImgAnnotations.hpp +++ b/include/depthai/pipeline/datatype/ImgAnnotations.hpp @@ -72,7 +72,7 @@ class ImgAnnotations : public Buffer, public ProtoSerializable { * * @returns serialized message */ - std::vector serializeProto() const override; + std::vector serializeProto(bool = false) const override; /** * Serialize schema to proto buffer diff --git a/include/depthai/pipeline/datatype/ImgDetections.hpp b/include/depthai/pipeline/datatype/ImgDetections.hpp index 11550f56e..53b5fb26f 100644 --- a/include/depthai/pipeline/datatype/ImgDetections.hpp +++ b/include/depthai/pipeline/datatype/ImgDetections.hpp @@ -46,7 +46,7 @@ class ImgDetections : public Buffer, public ProtoSerializable { * * @returns serialized message */ - std::vector serializeProto() const override; + std::vector serializeProto(bool = false) const override; /** * Serialize schema to proto buffer diff --git a/include/depthai/pipeline/datatype/ImgFrame.hpp b/include/depthai/pipeline/datatype/ImgFrame.hpp index 5abafe9f1..e26a9e462 100644 --- a/include/depthai/pipeline/datatype/ImgFrame.hpp +++ b/include/depthai/pipeline/datatype/ImgFrame.hpp @@ -90,7 +90,7 @@ class ImgFrame : public Buffer, public ProtoSerializable { * * @returns serialized message */ - std::vector serializeProto() const override; + std::vector serializeProto(bool metadataOnly = false) const override; /** * Serialize schema to proto buffer diff --git a/include/depthai/pipeline/datatype/PointCloudData.hpp b/include/depthai/pipeline/datatype/PointCloudData.hpp index de06cba01..baf5addb4 100644 --- a/include/depthai/pipeline/datatype/PointCloudData.hpp +++ b/include/depthai/pipeline/datatype/PointCloudData.hpp @@ -195,7 +195,7 @@ class PointCloudData : public Buffer, public ProtoSerializable { * * @returns serialized message */ - std::vector serializeProto() const override; + std::vector serializeProto(bool metadataOnly = false) const override; /** * Serialize schema to proto buffer diff --git a/include/depthai/pipeline/datatype/SpatialImgDetections.hpp b/include/depthai/pipeline/datatype/SpatialImgDetections.hpp index 1aa097c55..41afd0634 100644 --- a/include/depthai/pipeline/datatype/SpatialImgDetections.hpp +++ b/include/depthai/pipeline/datatype/SpatialImgDetections.hpp @@ -54,7 +54,7 @@ class SpatialImgDetections : public Buffer, public ProtoSerializable { * * @returns serialized message */ - std::vector serializeProto() const override; + std::vector serializeProto(bool = false) const override; /** * Serialize schema to proto buffer diff --git a/include/depthai/pipeline/node/Camera.hpp b/include/depthai/pipeline/node/Camera.hpp index ff697b6a5..62d7aebeb 100644 --- a/include/depthai/pipeline/node/Camera.hpp +++ b/include/depthai/pipeline/node/Camera.hpp @@ -11,6 +11,7 @@ #include "depthai/pipeline/datatype/CameraControl.hpp" #include "depthai/properties/CameraProperties.hpp" #include "depthai/utility/span.hpp" +#include "depthai/pipeline/node/host/Replay.hpp" namespace dai { namespace node { @@ -40,6 +41,16 @@ class Camera : public DeviceNodeCRTP, publ */ std::shared_ptr build(dai::CameraBoardSocket boardSocket = dai::CameraBoardSocket::AUTO); + /** + * Build with a specific board socket and mock input + */ + std::shared_ptr build(dai::CameraBoardSocket boardSocket, ReplayVideo& replay); + + /** + * Build with mock input + */ + std::shared_ptr build(ReplayVideo& replay); + /** * Get max width of the camera (can only be called after build) */ @@ -61,12 +72,24 @@ class Camera : public DeviceNodeCRTP, publ Input inputControl{ *this, {"inputControl", DEFAULT_GROUP, DEFAULT_BLOCKING, DEFAULT_QUEUE_SIZE, {{{DatatypeEnum::CameraControl, false}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + /** + * Input for mocking 'isp' functionality on RVC2. + * Default queue is blocking with size 8 + */ + Input mockIsp{*this, {"mockIsp", DEFAULT_GROUP, true, 8, {{{DatatypeEnum::ImgFrame, false}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + /** * Retrieves which board socket to use * @returns Board socket to use */ CameraBoardSocket getBoardSocket() const; + /** + * Set mock ISP for Camera node. Automatically sets mockIsp size. + * @param replay ReplayVideo node to use as mock ISP + */ + Camera& setMockIsp(ReplayVideo& replay); + private: class Impl; spimpl::impl_ptr pimpl; @@ -91,10 +114,15 @@ class Camera : public DeviceNodeCRTP, publ void buildStage1() override; + float getMaxRequestedFps() const; + uint32_t getMaxRequestedWidth() const; + uint32_t getMaxRequestedHeight() const; + protected: Properties& getProperties() override; bool isSourceNode() const override; NodeRecordParams getNodeRecordParams() const override; + Input& getReplayInput() override; private: bool isBuilt = false; diff --git a/include/depthai/properties/CameraProperties.hpp b/include/depthai/properties/CameraProperties.hpp index 5c61172f9..eeb454aec 100644 --- a/include/depthai/properties/CameraProperties.hpp +++ b/include/depthai/properties/CameraProperties.hpp @@ -118,6 +118,11 @@ struct CameraProperties : PropertiesSerializable { */ int32_t mockIspHeight = AUTO; + /** + * Select the mock isp fps. Overrides fps if mockIsp is connected. + */ + float mockIspFps = AUTO; + /** * Camera sensor FPS */ diff --git a/include/depthai/utility/ProtoSerializable.hpp b/include/depthai/utility/ProtoSerializable.hpp index e2afea9f5..aed9478b3 100644 --- a/include/depthai/utility/ProtoSerializable.hpp +++ b/include/depthai/utility/ProtoSerializable.hpp @@ -3,6 +3,7 @@ #include #include #include + namespace dai { class ProtoSerializable { @@ -19,7 +20,7 @@ class ProtoSerializable { * @brief Serialize the protobuf message of this object * @return serialized protobuf message */ - virtual std::vector serializeProto() const = 0; + virtual std::vector serializeProto(bool metadataOnly = false) const = 0; /** * @brief Serialize the schema of this object @@ -56,4 +57,4 @@ class ProtoSerializable { #endif }; -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/include/depthai/utility/RecordReplay.hpp b/include/depthai/utility/RecordReplay.hpp index 418b177d4..e7bda7ef7 100644 --- a/include/depthai/utility/RecordReplay.hpp +++ b/include/depthai/utility/RecordReplay.hpp @@ -18,7 +18,6 @@ #ifndef MCAP_COMPRESSION_NO_ZSTD #define MCAP_COMPRESSION_NO_ZSTD #endif -#include "RecordReplaySchema.hpp" namespace dai { @@ -33,7 +32,7 @@ struct RecordConfig { struct VideoEncoding { bool enabled = true; int bitrate = 0; - Profile profile = Profile::H264_MAIN; + Profile profile = Profile::MJPEG; bool lossless = false; int quality = 80; }; diff --git a/include/depthai/utility/RecordReplaySchema.hpp b/include/depthai/utility/RecordReplaySchema.hpp deleted file mode 100644 index fe241532c..000000000 --- a/include/depthai/utility/RecordReplaySchema.hpp +++ /dev/null @@ -1,695 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "depthai/pipeline/datatype/IMUData.hpp" -#include "depthai/pipeline/datatype/ImgFrame.hpp" - -namespace dai { -namespace utility { - -enum class RecordType { Other = 0, Video = 1, Imu = 2 }; - -struct VersionSchema { - uint16_t major; - uint16_t minor; - uint16_t patch; -}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(VersionSchema, major, minor, patch) - -struct TimestampSchema { - uint64_t seconds; - uint32_t nanoseconds; - - void set(std::chrono::nanoseconds time) { - seconds = std::chrono::duration_cast(time).count(); - nanoseconds = std::chrono::duration_cast(time).count() % 1000000000; - } - - std::chrono::nanoseconds get() const { - return std::chrono::seconds(seconds) + std::chrono::nanoseconds(nanoseconds); - } -}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(TimestampSchema, seconds, nanoseconds) - -// Make sure these structs are in sync with the JSON schema -struct DefaultRecordSchema { - VersionSchema version{0, 0, 1}; - RecordType type = RecordType::Other; - TimestampSchema timestamp; - uint64_t sequenceNumber; - std::vector data; -}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(DefaultRecordSchema, version, type, timestamp, sequenceNumber, data) - -struct IMUReportSchema { - // TODO(asahtik): This does not seem like a lasting way - // of representing the accuracy - enum class Accuracy : std::uint8_t { - UNRELIABLE = 0, - LOW = 1, - MEDIUM = 2, - HIGH = 3, - COVAR = 4, - }; - - TimestampSchema timestamp; - uint64_t sequenceNumber; - - Accuracy accuracy; - std::array covariance; -}; - -struct VectorSchema { - float x; - float y; - float z; -}; - -struct QuaternionSchema { - float i; - float j; - float k; - float real; -}; - -struct IMUVectorSchema : public IMUReportSchema, public VectorSchema {}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(IMUVectorSchema, x, y, z, timestamp, sequenceNumber, accuracy) - -struct IMUQuaternionSchema : public IMUReportSchema, public QuaternionSchema { - float rotationAccuracy; -}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(IMUQuaternionSchema, i, j, k, real, rotationAccuracy, timestamp, sequenceNumber, covariance, accuracy) - -struct IMUPacketSchema { - IMUVectorSchema orientation; - IMUVectorSchema acceleration; - IMUVectorSchema magneticField; - IMUQuaternionSchema rotationVector; -}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(IMUPacketSchema, orientation, acceleration, magneticField, rotationVector) - -struct IMURecordSchema { - VersionSchema version{0, 0, 1}; - RecordType type = RecordType::Imu; - std::vector packets; - - dai::IMUData getMessage() { - IMUData imuData; - imuData.packets.reserve(packets.size()); - auto maxTimestamp = packets.size() > 0 ? packets.front().acceleration.timestamp.get() : std::chrono::nanoseconds{0}; - auto maxSeqNo = packets.size() > 0 ? packets.front().acceleration.sequenceNumber : 0; - for(const auto& packet : packets) { - IMUPacket imuPacket; - imuPacket.acceleroMeter.tsDevice.sec = packet.acceleration.timestamp.seconds; - imuPacket.acceleroMeter.tsDevice.nsec = packet.acceleration.timestamp.nanoseconds; - imuPacket.acceleroMeter.sequence = packet.acceleration.sequenceNumber; - imuPacket.acceleroMeter.accuracy = (IMUReport::Accuracy)packet.acceleration.accuracy; - imuPacket.acceleroMeter.x = packet.acceleration.x; - imuPacket.acceleroMeter.y = packet.acceleration.y; - imuPacket.acceleroMeter.z = packet.acceleration.z; - - imuPacket.gyroscope.tsDevice.sec = packet.orientation.timestamp.seconds; - imuPacket.gyroscope.tsDevice.nsec = packet.orientation.timestamp.nanoseconds; - imuPacket.gyroscope.sequence = packet.orientation.sequenceNumber; - imuPacket.gyroscope.accuracy = (IMUReport::Accuracy)packet.orientation.accuracy; - imuPacket.gyroscope.x = packet.orientation.x; - imuPacket.gyroscope.y = packet.orientation.y; - imuPacket.gyroscope.z = packet.orientation.z; - - imuPacket.magneticField.tsDevice.sec = packet.magneticField.timestamp.seconds; - imuPacket.magneticField.tsDevice.nsec = packet.magneticField.timestamp.nanoseconds; - imuPacket.magneticField.sequence = packet.magneticField.sequenceNumber; - imuPacket.magneticField.accuracy = (IMUReport::Accuracy)packet.magneticField.accuracy; - imuPacket.magneticField.x = packet.magneticField.x; - imuPacket.magneticField.y = packet.magneticField.y; - imuPacket.magneticField.z = packet.magneticField.z; - - imuPacket.rotationVector.tsDevice.sec = packet.rotationVector.timestamp.seconds; - imuPacket.rotationVector.tsDevice.nsec = packet.rotationVector.timestamp.nanoseconds; - imuPacket.rotationVector.sequence = packet.rotationVector.sequenceNumber; - imuPacket.rotationVector.accuracy = (IMUReport::Accuracy)packet.rotationVector.accuracy; - imuPacket.rotationVector.i = packet.rotationVector.i; - imuPacket.rotationVector.j = packet.rotationVector.j; - imuPacket.rotationVector.k = packet.rotationVector.k; - imuPacket.rotationVector.real = packet.rotationVector.real; - imuPacket.rotationVector.rotationVectorAccuracy = packet.rotationVector.rotationAccuracy; - - imuData.packets.push_back(imuPacket); - std::max({maxTimestamp, - packet.rotationVector.timestamp.get(), - packet.orientation.timestamp.get(), - packet.acceleration.timestamp.get(), - packet.magneticField.timestamp.get()}); - std::max({maxSeqNo, - packet.rotationVector.sequenceNumber, - packet.orientation.sequenceNumber, - packet.acceleration.sequenceNumber, - packet.magneticField.sequenceNumber}); - } - imuData.setTimestampDevice(std::chrono::steady_clock::time_point(maxTimestamp)); - imuData.setSequenceNum(maxSeqNo); - return imuData; - } -}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(IMURecordSchema, version, type, packets) - -struct VideoCameraSettingsSchema { - int32_t exposure; - int32_t sensitivity; - int32_t lensPosition; - int32_t wbColorTemp; -}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(VideoCameraSettingsSchema, exposure, sensitivity, lensPosition, wbColorTemp) - -struct VideoRecordSchema { - VersionSchema version{0, 0, 1}; - RecordType type = RecordType::Video; - TimestampSchema timestamp; - uint64_t sequenceNumber; - uint64_t instanceNumber; - uint32_t width; - uint32_t height; - VideoCameraSettingsSchema cameraSettings; - - dai::ImgFrame getMessage() { - ImgFrame imgFrame; - imgFrame.setWidth(width); - imgFrame.setHeight(height); - imgFrame.setTimestampDevice(std::chrono::time_point(timestamp.get())); - imgFrame.setSequenceNum(sequenceNumber); - imgFrame.setInstanceNum(instanceNumber); - imgFrame.cam.wbColorTemp = cameraSettings.wbColorTemp; - imgFrame.cam.lensPosition = cameraSettings.lensPosition; - imgFrame.cam.exposureTimeUs = cameraSettings.exposure; - imgFrame.cam.sensitivityIso = cameraSettings.sensitivity; - imgFrame.transformation = ImgTransformation(width, height); - return imgFrame; - } -}; - -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(VideoRecordSchema, version, type, timestamp, sequenceNumber, instanceNumber, width, height, cameraSettings) - -constexpr const char* DEFAULT_SHEMA = R"( -{ - "title": "Default record meta", - "description": "Metadata for an unspecified record stream", - "type": "object", - "properties": { - "version": { - "type": "object", - "description": "Metadata version", - "properties": { - "major": { - "type": "integer", - "description": "Major version" - }, - "minor": { - "type": "integer", - "description": "Minor version" - }, - "patch": { - "type": "integer", - "description": "Patch version" - } - } - }, - "type": { - "description": "Type of the record", - "oneOf": [ - { - "title": "Other", - "const": 0 - }, - { - "title": "Video", - "const": 1 - }, - { - "title": "Imu", - "const": 2 - } - ] - }, - "timestamp": { - "type": "object", - "description": "Timestamp of the message", - "properties": { - "seconds": { - "type": "integer", - "description": "Seconds part of the timestamp" - }, - "nanoseconds": { - "type": "integer", - "description": "Nanoseconds part of the timestamp" - } - } - }, - "sequenceNumber": { - "type": "integer", - "description": "Sequence number of the message" - }, - "data": { - "type": "bytes", - "description": "Buffer data" - } - } -} -)"; - -// TODO(asahtik): Add values, maybe covariance -constexpr const char* IMU_SHEMA = R"( -{ - "title": "IMU record meta", - "description": "Metadata for an IMU record stream", - "type": "object", - "properties": { - "version": { - "type": "object", - "description": "Metadata version", - "properties": { - "major": { - "type": "integer", - "description": "Major version" - }, - "minor": { - "type": "integer", - "description": "Minor version" - }, - "patch": { - "type": "integer", - "description": "Patch version" - } - } - }, - "type": { - "description": "Type of the record", - "oneOf": [ - { - "title": "Other", - "const": 0 - }, - { - "title": "Video", - "const": 1 - }, - { - "title": "Imu", - "const": 2 - } - ] - }, - "packets": { - "type": "array", - "description": "IMU packets", - "items": { - "type": "object", - "description": "IMU packet", - "properties": { - "orientation": { - "type": "object", - "description": "Gyroscope orientation", - "properties": { - "timestamp": { - "type": "object", - "description": "Timestamp of the message", - "properties": { - "seconds": { - "type": "integer", - "description": "Seconds part of the timestamp" - }, - "nanoseconds": { - "type": "integer", - "description": "Nanoseconds part of the timestamp" - } - } - }, - "sequenceNumber": { - "type": "integer", - "description": "Sequence number of the message" - }, - "accuracy": { - "description": "Accuracy of the measurement", - "oneOf": [ - { - "title": "Unreliable", - "const": 0 - }, - { - "title": "Low", - "const": 1 - }, - { - "title": "Medium", - "const": 2 - }, - { - "title": "High", - "const": 3 - }, - { - "title": "Covariance", - "const": 4 - } - - ] - }, - "covariance": { - "type": "array", - "description": "Covariance matrix", - "items": { - "type": "number" - } - }, - "x": { - "type": "number", - "description": "X axis value" - }, - "y": { - "type": "number", - "description": "Y axis value" - }, - "z": { - "type": "number", - "description": "Z axis value" - } - } - }, - "acceleration": { - "type": "object", - "description": "Acceleration vector", - "properties": { - "timestamp": { - "type": "object", - "description": "Timestamp of the message", - "properties": { - "seconds": { - "type": "integer", - "description": "Seconds part of the timestamp" - }, - "nanoseconds": { - "type": "integer", - "description": "Nanoseconds part of the timestamp" - } - } - }, - "sequenceNumber": { - "type": "integer", - "description": "Sequence number of the message" - }, - "accuracy": { - "description": "Accuracy of the measurement", - "oneOf": [ - { - "title": "Unreliable", - "const": 0 - }, - { - "title": "Low", - "const": 1 - }, - { - "title": "Medium", - "const": 2 - }, - { - "title": "High", - "const": 3 - } - ] - }, - "covariance": { - "type": "array", - "description": "Covariance matrix", - "items": { - "type": "number" - } - }, - "x": { - "type": "number", - "description": "X axis value" - }, - "y": { - "type": "number", - "description": "Y axis value" - }, - "z": { - "type": "number", - "description": "Z axis value" - } - } - }, - "magneticField": { - "type": "object", - "description": "Magnetic field orientation", - "properties": { - "timestamp": { - "type": "object", - "description": "Timestamp of the message", - "properties": { - "seconds": { - "type": "integer", - "description": "Seconds part of the timestamp" - }, - "nanoseconds": { - "type": "integer", - "description": "Nanoseconds part of the timestamp" - } - } - }, - "sequenceNumber": { - "type": "integer", - "description": "Sequence number of the message" - }, - "accuracy": { - "description": "Accuracy of the measurement", - "oneOf": [ - { - "title": "Unreliable", - "const": 0 - }, - { - "title": "Low", - "const": 1 - }, - { - "title": "Medium", - "const": 2 - }, - { - "title": "High", - "const": 3 - } - ] - }, - "covariance": { - "type": "array", - "description": "Covariance matrix", - "items": { - "type": "number" - } - }, - "x": { - "type": "number", - "description": "X axis value" - }, - "y": { - "type": "number", - "description": "Y axis value" - }, - "z": { - "type": "number", - "description": "Z axis value" - } - } - }, - "rotationVector": { - "type": "object", - "description": "Rotation vector with accuracy", - "properties": { - "timestamp": { - "type": "object", - "description": "Timestamp of the message", - "properties": { - "seconds": { - "type": "integer", - "description": "Seconds part of the timestamp" - }, - "nanoseconds": { - "type": "integer", - "description": "Nanoseconds part of the timestamp" - } - } - }, - "sequenceNumber": { - "type": "integer", - "description": "Sequence number of the message" - }, - "accuracy": { - "description": "Accuracy of the measurement", - "oneOf": [ - { - "title": "Unreliable", - "const": 0 - }, - { - "title": "Low", - "const": 1 - }, - { - "title": "Medium", - "const": 2 - }, - { - "title": "High", - "const": 3 - } - ] - }, - "covariance": { - "type": "array", - "description": "Covariance matrix", - "items": { - "type": "number" - } - }, - "i": { - "type": "number", - "description": "i axis quaterion value" - }, - "j": { - "type": "number", - "description": "j axis quaternion value" - }, - "k": { - "type": "number", - "description": "k axis quaternion value" - }, - "real": { - "type": "number", - "description": "Quaternion scale value" - }, - "rotationAccuracy": { - "type": "number", - "description": "Accuracy of the rotation vector in radians" - } - } - } - } - } - } - } -} -)"; - -constexpr const char* VIDEO_SHEMA = R"( -{ - "title": "Default record meta", - "description": "Metadata for an unspecified record stream", - "type": "object", - "properties": { - "version": { - "type": "object", - "description": "Metadata version", - "properties": { - "major": { - "type": "integer", - "description": "Major version" - }, - "minor": { - "type": "integer", - "description": "Minor version" - }, - "patch": { - "type": "integer", - "description": "Patch version" - } - } - }, - "type": { - "description": "Type of the record", - "oneOf": [ - { - "title": "Other", - "const": 0 - }, - { - "title": "Video", - "const": 1 - }, - { - "title": "Imu", - "const": 2 - } - ] - }, - "timestamp": { - "type": "object", - "description": "Timestamp of the message", - "properties": { - "seconds": { - "type": "integer", - "description": "Seconds part of the timestamp" - }, - "nanoseconds": { - "type": "integer", - "description": "Nanoseconds part of the timestamp" - } - } - }, - "sequenceNumber": { - "type": "integer", - "description": "Sequence number of the message" - }, - "instanceNumber": { - "type": "integer", - "description": "Instance number of the source" - }, - "width": { - "type": "integer", - "description": "Frame width" - }, - "height": { - "type": "integer", - "description": "Frame height" - }, - "cameraSettings": { - "type": "object", - "description": "Camera settings", - "properties": { - "exposure": { - "type": "number", - "description": "Exposure time in microseconds" - }, - "sensitivity": { - "type": "number", - "description": "Sensitivity in ISO" - }, - "lensPosition": { - "type": "number", - "description": "Lens position" - }, - "wbColorTemp": { - "type": "number", - "description": "Lens position" - } - } - } - } -} -)"; - -} // namespace utility -} // namespace dai diff --git a/protos/EncodedFrame.proto b/protos/EncodedFrame.proto index b91b9ae1a..fa1e5e0f7 100644 --- a/protos/EncodedFrame.proto +++ b/protos/EncodedFrame.proto @@ -6,22 +6,21 @@ import "common.proto"; message EncodedFrame { common.CameraSettings cam = 1; - uint32 instanceNum = 2; // Which source created this frame (color, mono, ...) - uint32 width = 3; // width in pixels - uint32 height = 4; // height in pixels - uint32 quality = 5; // Encoding quality - uint32 bitrate = 6; // Encoding bitrate - Profile profile = 7; // Encoding profile (JPEG, AVC, HEVC) - bool lossless = 8; // JPEG: true if lossless - FrameType type = 9; // H264: frame type (I, P, B) - uint32 frameOffset = 10; // Frame offset - uint32 frameSize = 11; // Frame size - int64 sequenceNum = 12; // Sequence number - common.Timestamp ts = 13; // Timestamp + uint32 instanceNum = 2; // Which source created this frame (color, mono, ...) + uint32 width = 3; // width in pixels + uint32 height = 4; // height in pixels + uint32 quality = 5; // Encoding quality + uint32 bitrate = 6; // Encoding bitrate + Profile profile = 7; // Encoding profile (JPEG, AVC, HEVC) + bool lossless = 8; // JPEG: true if lossless + FrameType type = 9; // H264: frame type (I, P, B) + uint32 frameOffset = 10; // Frame offset + uint32 frameSize = 11; // Frame size + int64 sequenceNum = 12; // Sequence number + common.Timestamp ts = 13; // Timestamp common.Timestamp tsDevice = 14; // Timestamp from device common.ImgTransformation transformation = 15; - bytes data = 16; // Encoded frame data - + bytes data = 16; // Encoded frame data } enum Profile { diff --git a/protos/IMUData.proto b/protos/IMUData.proto new file mode 100644 index 000000000..8143fab70 --- /dev/null +++ b/protos/IMUData.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package dai.proto.imu_data; + +import "common.proto"; + +message IMUVec { + float x = 1; + float y = 2; + float z = 3; +} + +message IMUQuat { + float x = 1; + float y = 2; + float z = 3; + float w = 4; +} + +enum Accuracy { + UNRELIABLE = 0; + LOW = 1; + MEDIUM = 2; + HIGH = 3; +} + +message IMUReport { + int32 sequence = 1; + Accuracy accuracy = 2; + common.Timestamp ts = 3; + common.Timestamp tsDevice = 4; +} + +message IMUReportVec { + IMUReport report = 1; + IMUVec vec = 2; +} + +message IMUReportQuatWAcc { + IMUReport report = 1; + IMUQuat quat = 2; + float rotationVectorAccuracy = 3; +} + +message IMUPacket { + IMUReportVec accelerometer = 1; + IMUReportVec gyroscope = 2; + IMUReportVec magnetometer = 3; + IMUReportQuatWAcc rotationVector = 4; +} + +message IMUData { + repeated IMUPacket packets = 1; + int64 sequenceNum = 2; // Sequence number + common.Timestamp ts = 3; // Timestamp + common.Timestamp tsDevice = 4; // Timestamp from device +} diff --git a/src/opencv/HolisticRecordReplay.cpp b/src/opencv/HolisticRecordReplay.cpp index cec216f37..28c9ea898 100644 --- a/src/opencv/HolisticRecordReplay.cpp +++ b/src/opencv/HolisticRecordReplay.cpp @@ -21,10 +21,18 @@ namespace dai { namespace utility { +inline size_t roundDown(size_t numToRound, size_t multiple) { + return numToRound - numToRound % multiple; +} +inline size_t roundUp(size_t numToRound, size_t multiple) { + return roundDown(numToRound + multiple - 1UL, multiple); +} + bool setupHolisticRecord(Pipeline& pipeline, const std::string& deviceId, RecordConfig& recordConfig, - std::unordered_map& outFilenames) { + std::unordered_map& outFilenames, + bool legacy) { auto sources = pipeline.getSourceNodes(); const auto recordPath = recordConfig.outputDir; try { @@ -40,9 +48,45 @@ bool setupHolisticRecord(Pipeline& pipeline, outFilenames[nodeName] = filePath; if(std::dynamic_pointer_cast(node) != nullptr || std::dynamic_pointer_cast(node) != nullptr || std::dynamic_pointer_cast(node) != nullptr) { + Node::Output* output; + size_t camWidth = 1920, camHeight = 1080; if(std::dynamic_pointer_cast(node) != nullptr) { - // TODO(asahtik) - throw std::runtime_error("Holistic record with Camera node is not yet supported."); + auto cam = std::dynamic_pointer_cast(node); + auto fps = cam->getMaxRequestedFps(); + size_t requestWidth = cam->getMaxRequestedWidth(); + size_t requestHeight = cam->getMaxRequestedHeight(); + size_t width = cam->getMaxWidth(); + size_t height = cam->getMaxHeight(); + auto cams = pipeline.getDefaultDevice()->getConnectedCameraFeatures(); + for(const auto& cf : cams) { + if(cf.socket == cam->getBoardSocket()) { + for(const auto& cfg : cf.configs) { + if(cfg.width > (int32_t)requestWidth && cfg.height > (int32_t)requestHeight) { + width = std::min((size_t)cfg.width, width); + height = std::min((size_t)cfg.height, height); + } + } + break; + } + } + if(legacy) { + if(width % 32 != 0UL) { + auto down = roundDown(width, 32); + width = down < requestWidth ? roundUp(width, 32) : down; + } + if(height % 8 != 0UL) { + auto down = roundDown(height, 8); + height = down < requestHeight ? roundUp(height, 8) : down; + } + } + camWidth = width; + camHeight = height; + output = cam->requestOutput({width, height}, dai::ImgFrame::Type::NV12, dai::ImgResizeMode::CROP, fps); + if(width * height > 9437184U) { + recordConfig.videoEncoding.enabled = true; + } + } else { + output = &nodeS->getRecordOutput(); } auto recordNode = pipeline.create(); recordNode->setRecordMetadataFile(filePath + ".mcap"); @@ -54,8 +98,8 @@ bool setupHolisticRecord(Pipeline& pipeline, videnc->setLossless(recordConfig.videoEncoding.lossless); videnc->setBitrate(recordConfig.videoEncoding.bitrate); videnc->setQuality(recordConfig.videoEncoding.quality); - int maxOutputFrameSize = 3110400; - if(std::dynamic_pointer_cast(node) != nullptr || std::dynamic_pointer_cast(node) != nullptr) { + if((std::dynamic_pointer_cast(node) != nullptr || std::dynamic_pointer_cast(node) != nullptr) && legacy) { + int maxOutputFrameSize = camWidth * camHeight * 3; if(std::dynamic_pointer_cast(node) != nullptr) { auto cam = std::dynamic_pointer_cast(node); maxOutputFrameSize = std::get<0>(cam->getIspSize()) * std::get<1>(cam->getIspSize()) * 3; @@ -64,14 +108,14 @@ bool setupHolisticRecord(Pipeline& pipeline, imageManip->initialConfig.setFrameType(ImgFrame::Type::NV12); imageManip->setMaxOutputFrameSize(maxOutputFrameSize); - nodeS->getRecordOutput().link(imageManip->inputImage); + output->link(imageManip->inputImage); imageManip->out.link(videnc->input); } else { - nodeS->getRecordOutput().link(videnc->input); + output->link(videnc->input); } videnc->out.link(recordNode->input); } else { - nodeS->getRecordOutput().link(recordNode->input); + output->link(recordNode->input); } } else { auto recordNode = pipeline.create(); @@ -102,7 +146,8 @@ bool setupHolisticReplay(Pipeline& pipeline, std::string replayPath, const std::string& deviceId, RecordConfig& recordConfig, - std::unordered_map& outFilenames) { + std::unordered_map& outFilenames, + bool legacy) { UNUSED(deviceId); const std::string rootPath = platform::getDirFromPath(replayPath); auto sources = pipeline.getSourceNodes(); @@ -222,15 +267,15 @@ bool setupHolisticReplay(Pipeline& pipeline, replay->setReplayMetadataFile(platform::joinPaths(rootPath, nodeName + ".mcap")); // replay->setReplayVideo(platform::joinPaths(rootPath, (mxId + "_").append(nodeName).append(".mp4"))); replay->setReplayVideoFile(platform::joinPaths(rootPath, nodeName + ".mp4")); - replay->setOutFrameType(ImgFrame::Type::YUV420p); + replay->setOutFrameType(legacy ? ImgFrame::Type::YUV420p : ImgFrame::Type::NV12); auto videoSize = BytePlayer::getVideoSize(replay->getReplayMetadataFile().string()); if(videoSize.has_value()) { auto [width, height] = videoSize.value(); if(std::dynamic_pointer_cast(node) != nullptr) { - // TODO(asahtik) - /*auto cam = std::dynamic_pointer_cast(node);*/ - /*cam->setMockIspSize(width, height);*/ + auto cam = std::dynamic_pointer_cast(node); + cam->properties.mockIspWidth = width; + cam->properties.mockIspHeight = height; } else if(std::dynamic_pointer_cast(node) != nullptr) { auto cam = std::dynamic_pointer_cast(node); cam->setMockIspSize(width, height); diff --git a/src/opencv/RecordReplay.cpp b/src/opencv/RecordReplay.cpp index 5b6c5093e..ab5b82867 100644 --- a/src/opencv/RecordReplay.cpp +++ b/src/opencv/RecordReplay.cpp @@ -59,7 +59,7 @@ void VideoRecorder::init(const std::string& filePath, unsigned int width, unsign switch(codec) { case VideoCodec::H264: case VideoCodec::MJPEG: - mp4Writer = MP4Create(filePath.c_str(), 0); + mp4Writer = MP4Create(filePath.c_str()); if(mp4Writer == MP4_INVALID_FILE_HANDLE) { throw std::runtime_error("Failed to create MP4 file"); } @@ -217,5 +217,17 @@ void VideoPlayer::close() { } } +std::tuple getVideoSize(const std::string& filePath) { + cv::VideoCapture cvReader; + cvReader.open(filePath); + if(!cvReader.isOpened()) { + throw std::runtime_error("Failed to open video file"); + } + auto width = cvReader.get(cv::CAP_PROP_FRAME_WIDTH); + auto height = cvReader.get(cv::CAP_PROP_FRAME_HEIGHT); + cvReader.release(); + return {width, height}; +} + } // namespace utility } // namespace dai diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 71cc2d6e4..f5232c5fc 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -605,7 +605,8 @@ void PipelineImpl::build() { std::string replayPath = utility::getEnv("DEPTHAI_REPLAY"); if(defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X) { + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_RVC4) { try { #ifdef DEPTHAI_MERGED_TARGET if(enableHolisticRecordReplay) { @@ -631,7 +632,12 @@ void PipelineImpl::build() { } else if(!recordPath.empty()) { if(enableHolisticRecordReplay || utility::checkRecordConfig(recordPath, recordConfig)) { if(platform::checkWritePermissions(recordPath)) { - if(utility::setupHolisticRecord(parent, defaultDeviceId, recordConfig, recordReplayFilenames)) { + if(utility::setupHolisticRecord(parent, + defaultDeviceId, + recordConfig, + recordReplayFilenames, + defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { recordConfig.state = RecordConfig::RecordReplayState::RECORD; Logging::getInstance().logger.info("Record enabled."); } else { @@ -646,7 +652,13 @@ void PipelineImpl::build() { } else if(!replayPath.empty()) { if(platform::checkPathExists(replayPath)) { if(platform::checkWritePermissions(replayPath)) { - if(utility::setupHolisticReplay(parent, replayPath, defaultDeviceId, recordConfig, recordReplayFilenames)) { + if(utility::setupHolisticReplay(parent, + replayPath, + defaultDeviceId, + recordConfig, + recordReplayFilenames, + defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { recordConfig.state = RecordConfig::RecordReplayState::REPLAY; if(platform::checkPathExists(replayPath, true)) { removeRecordReplayFiles = false; diff --git a/src/pipeline/datatype/EncodedFrame.cpp b/src/pipeline/datatype/EncodedFrame.cpp index 2b4933f47..58f33ba7a 100644 --- a/src/pipeline/datatype/EncodedFrame.cpp +++ b/src/pipeline/datatype/EncodedFrame.cpp @@ -1,6 +1,6 @@ #include "depthai/pipeline/datatype/EncodedFrame.hpp" #ifdef DEPTHAI_ENABLE_PROTOBUF - #include "../../utility/ProtoSerialize.hpp" + #include "utility/ProtoSerialize.hpp" #include "depthai/schemas/EncodedFrame.pb.h" #endif @@ -126,57 +126,28 @@ EncodedFrame& EncodedFrame::setProfile(Profile profile) { return *this; } -#ifdef DEPTHAI_ENABLE_PROTOBUF -std::unique_ptr getProtoMessage(const EncodedFrame* frame) { - // Create a unique pointer to the protobuf EncodedFrame message - auto encodedFrame = std::make_unique(); - - // Populate the protobuf message fields with the EncodedFrame data - encodedFrame->set_instancenum(frame->instanceNum); // instanceNum -> instancenum - encodedFrame->set_width(frame->width); - encodedFrame->set_height(frame->height); - encodedFrame->set_quality(frame->quality); - encodedFrame->set_bitrate(frame->bitrate); - encodedFrame->set_profile(static_cast(frame->profile)); // Profile enum - encodedFrame->set_lossless(frame->lossless); - encodedFrame->set_type(static_cast(frame->type)); // FrameType enum - encodedFrame->set_frameoffset(frame->frameOffset); // frameOffset -> frameoffset - encodedFrame->set_framesize(frame->frameSize); // frameSize -> framesize - encodedFrame->set_sequencenum(frame->sequenceNum); // sequenceNum -> sequencenum - - // Set timestamps - proto::common::Timestamp* ts = encodedFrame->mutable_ts(); - ts->set_sec(frame->ts.sec); - ts->set_nsec(frame->ts.nsec); - - proto::common::Timestamp* tsDevice = encodedFrame->mutable_tsdevice(); - tsDevice->set_sec(frame->tsDevice.sec); - tsDevice->set_nsec(frame->tsDevice.nsec); - - // Set camera settings - proto::common::CameraSettings* cam = encodedFrame->mutable_cam(); - cam->set_exposuretimeus(frame->cam.exposureTimeUs); // exposureTimeUs -> exposuretimeus - cam->set_sensitivityiso(frame->cam.sensitivityIso); // sensitivityIso -> sensitivityiso - cam->set_lensposition(frame->cam.lensPosition); // lensPosition -> lensposition - cam->set_wbcolortemp(frame->cam.wbColorTemp); // wbColorTemp -> wbcolortemp - cam->set_lenspositionraw(frame->cam.lensPositionRaw); // lensPositionRaw -> lenspositionraw - - // Set the encoded frame data - encodedFrame->set_data(frame->data->getData().data(), frame->data->getData().size()); - - proto::common::ImgTransformation* imgTransformation = encodedFrame->mutable_transformation(); - utility::serializeImgTransformation(imgTransformation, frame->transformation); - - // Return the populated protobuf message - return encodedFrame; +ImgFrame EncodedFrame::getImgFrameMeta() const { + ImgFrame frame; + frame.cam = cam; + frame.setInstanceNum(instanceNum); + frame.setWidth(width); + frame.setHeight(height); + frame.setType(ImgFrame::Type::BITSTREAM); + frame.transformation = transformation; + frame.setSourceSize(transformation.getSourceSize()); + frame.setSequenceNum(getSequenceNum()); + frame.setTimestamp(getTimestamp()); + frame.setTimestampDevice(getTimestampDevice()); + return frame; } +#ifdef DEPTHAI_ENABLE_PROTOBUF ProtoSerializable::SchemaPair EncodedFrame::serializeSchema() const { - return utility::serializeSchema(getProtoMessage(this)); + return utility::serializeSchema(utility::getProtoMessage(this)); } -std::vector EncodedFrame::serializeProto() const { - return utility::serializeProto(getProtoMessage(this)); +std::vector EncodedFrame::serializeProto(bool metadataOnly) const { + return utility::serializeProto(utility::getProtoMessage(this, metadataOnly)); } #endif diff --git a/src/pipeline/datatype/IMUData.cpp b/src/pipeline/datatype/IMUData.cpp index 93f54ad9a..ae88c0590 100644 --- a/src/pipeline/datatype/IMUData.cpp +++ b/src/pipeline/datatype/IMUData.cpp @@ -1,7 +1,22 @@ #include "depthai/pipeline/datatype/IMUData.hpp" +#if DEPTHAI_ENABLE_PROTOBUF +#include "depthai/schemas/IMUData.pb.h" +#include "depthai/schemas/common.pb.h" +#include "utility/ProtoSerialize.hpp" +#endif + namespace dai { -span IMUData::getRecordData() const { - return {(uint8_t*)packets.data(), packets.size() * sizeof(IMUPacket)}; + +#ifdef DEPTHAI_ENABLE_PROTOBUF + +ProtoSerializable::SchemaPair IMUData::serializeSchema() const { + return utility::serializeSchema(utility::getProtoMessage(this)); } + +std::vector IMUData::serializeProto(bool) const { + return utility::serializeProto(utility::getProtoMessage(this)); +} +#endif + } // namespace dai diff --git a/src/pipeline/datatype/ImgAnnotations.cpp b/src/pipeline/datatype/ImgAnnotations.cpp index 7922af4d7..a703e5161 100644 --- a/src/pipeline/datatype/ImgAnnotations.cpp +++ b/src/pipeline/datatype/ImgAnnotations.cpp @@ -1,94 +1,20 @@ #include "depthai/pipeline/datatype/ImgAnnotations.hpp" -#include "utility/ProtoSerializable.hpp" - #ifdef DEPTHAI_ENABLE_PROTOBUF - #include "../../utility/ProtoSerialize.hpp" - #include "depthai/schemas/ImageAnnotations.pb.h" + #include "utility/ProtoSerializable.hpp" + #include "utility/ProtoSerialize.hpp" #endif namespace dai { #ifdef DEPTHAI_ENABLE_PROTOBUF -std::unique_ptr getProtoMessage(const ImgAnnotations* daiAnnotations) { - auto imageAnnotations = std::make_unique(); - - imageAnnotations->set_sequencenum(daiAnnotations->sequenceNum); - proto::common::Timestamp* ts = imageAnnotations->mutable_ts(); - ts->set_sec(daiAnnotations->ts.sec); - ts->set_nsec(daiAnnotations->ts.nsec); - proto::common::Timestamp* tsDevice = imageAnnotations->mutable_tsdevice(); - tsDevice->set_sec(daiAnnotations->tsDevice.sec); - tsDevice->set_nsec(daiAnnotations->tsDevice.nsec); - - for(const auto& annotation : daiAnnotations->annotations) { - proto::image_annotations::ImageAnnotation* imageAnnotation = imageAnnotations->add_annotations(); - for(const auto& circle : annotation.circles) { - proto::image_annotations::CircleAnnotation* circleAnnotation = imageAnnotation->add_circles(); - circleAnnotation->mutable_position()->set_x(circle.position.x); - circleAnnotation->mutable_position()->set_y(circle.position.y); - circleAnnotation->set_diameter(circle.diameter); - circleAnnotation->set_thickness(circle.thickness); - circleAnnotation->mutable_fillcolor()->set_r(circle.fillColor.r); - circleAnnotation->mutable_fillcolor()->set_g(circle.fillColor.g); - circleAnnotation->mutable_fillcolor()->set_b(circle.fillColor.b); - circleAnnotation->mutable_fillcolor()->set_a(circle.fillColor.a); - circleAnnotation->mutable_outlinecolor()->set_r(circle.outlineColor.r); - circleAnnotation->mutable_outlinecolor()->set_g(circle.outlineColor.g); - circleAnnotation->mutable_outlinecolor()->set_b(circle.outlineColor.b); - circleAnnotation->mutable_outlinecolor()->set_a(circle.outlineColor.a); - } - for(const auto& points : annotation.points) { - proto::image_annotations::PointsAnnotation* pointsAnnotation = imageAnnotation->add_points(); - PointsAnnotationType type = points.type; - pointsAnnotation->set_type(static_cast(type)); - for(const auto& point : points.points) { - proto::common::Point2f* protoPoint = pointsAnnotation->add_points(); - protoPoint->set_x(point.x); - protoPoint->set_y(point.y); - } - pointsAnnotation->mutable_outlinecolor()->set_r(points.outlineColor.r); - pointsAnnotation->mutable_outlinecolor()->set_g(points.outlineColor.g); - pointsAnnotation->mutable_outlinecolor()->set_b(points.outlineColor.b); - pointsAnnotation->mutable_outlinecolor()->set_a(points.outlineColor.a); - for(const auto& color : points.outlineColors) { - proto::common::Color* protoColor = pointsAnnotation->add_outlinecolors(); - protoColor->set_r(color.r); - protoColor->set_g(color.g); - protoColor->set_b(color.b); - protoColor->set_a(color.a); - } - pointsAnnotation->mutable_fillcolor()->set_r(points.fillColor.r); - pointsAnnotation->mutable_fillcolor()->set_g(points.fillColor.g); - pointsAnnotation->mutable_fillcolor()->set_b(points.fillColor.b); - pointsAnnotation->mutable_fillcolor()->set_a(points.fillColor.a); - pointsAnnotation->set_thickness(points.thickness); - } - for(const auto& text : annotation.texts) { - proto::image_annotations::TextAnnotation* textAnnotation = imageAnnotation->add_texts(); - textAnnotation->mutable_position()->set_x(text.position.x); - textAnnotation->mutable_position()->set_y(text.position.y); - textAnnotation->set_text(text.text); - textAnnotation->set_fontsize(text.fontSize); - textAnnotation->mutable_textcolor()->set_r(text.textColor.r); - textAnnotation->mutable_textcolor()->set_g(text.textColor.g); - textAnnotation->mutable_textcolor()->set_b(text.textColor.b); - textAnnotation->mutable_textcolor()->set_a(text.textColor.a); - textAnnotation->mutable_backgroundcolor()->set_r(text.backgroundColor.r); - textAnnotation->mutable_backgroundcolor()->set_g(text.backgroundColor.g); - textAnnotation->mutable_backgroundcolor()->set_b(text.backgroundColor.b); - textAnnotation->mutable_backgroundcolor()->set_a(text.backgroundColor.a); - } - } - return imageAnnotations; -} ProtoSerializable::SchemaPair ImgAnnotations::serializeSchema() const { - return utility::serializeSchema(getProtoMessage(this)); + return utility::serializeSchema(utility::getProtoMessage(this)); } -std::vector ImgAnnotations::serializeProto() const { - return utility::serializeProto(getProtoMessage(this)); +std::vector ImgAnnotations::serializeProto(bool) const { + return utility::serializeProto(utility::getProtoMessage(this)); } #endif diff --git a/src/pipeline/datatype/ImgDetections.cpp b/src/pipeline/datatype/ImgDetections.cpp index 565b9328a..9cc9489f4 100644 --- a/src/pipeline/datatype/ImgDetections.cpp +++ b/src/pipeline/datatype/ImgDetections.cpp @@ -1,46 +1,18 @@ #include "depthai/pipeline/datatype/ImgDetections.hpp" #ifdef DEPTHAI_ENABLE_PROTOBUF - #include "../../utility/ProtoSerialize.hpp" + #include "utility/ProtoSerialize.hpp" #include "depthai/schemas/ImgDetections.pb.h" #endif namespace dai { #ifdef DEPTHAI_ENABLE_PROTOBUF -std::unique_ptr getProtoMessage(const ImgDetections* detections) { - auto imgDetections = std::make_unique(); - - imgDetections->set_sequencenum(detections->sequenceNum); - proto::common::Timestamp* ts = imgDetections->mutable_ts(); - ts->set_sec(detections->ts.sec); - ts->set_nsec(detections->ts.nsec); - proto::common::Timestamp* tsDevice = imgDetections->mutable_tsdevice(); - tsDevice->set_sec(detections->tsDevice.sec); - tsDevice->set_nsec(detections->tsDevice.nsec); - - for(const auto& detection : detections->detections) { - proto::img_detections::ImgDetection* imgDetection = imgDetections->add_detections(); - imgDetection->set_label(detection.label); - imgDetection->set_confidence(detection.confidence); - imgDetection->set_xmin(detection.xmin); - imgDetection->set_ymin(detection.ymin); - imgDetection->set_xmax(detection.xmax); - imgDetection->set_ymax(detection.ymax); - } - - proto::common::ImgTransformation* imgTransformation = imgDetections->mutable_transformation(); - if(detections->transformation.has_value()) { - utility::serializeImgTransformation(imgTransformation, detections->transformation.value()); - } - return imgDetections; -} - ProtoSerializable::SchemaPair ImgDetections::serializeSchema() const { - return utility::serializeSchema(getProtoMessage(this)); + return utility::serializeSchema(utility::getProtoMessage(this)); } -std::vector ImgDetections::serializeProto() const { - return utility::serializeProto(getProtoMessage(this)); +std::vector ImgDetections::serializeProto(bool) const { + return utility::serializeProto(utility::getProtoMessage(this)); } #endif diff --git a/src/pipeline/datatype/ImgFrame.cpp b/src/pipeline/datatype/ImgFrame.cpp index d2e2bb760..24dbc3604 100644 --- a/src/pipeline/datatype/ImgFrame.cpp +++ b/src/pipeline/datatype/ImgFrame.cpp @@ -5,7 +5,7 @@ #include "depthai/common/RotatedRect.hpp" #include "depthai/utility/SharedMemory.hpp" #ifdef DEPTHAI_ENABLE_PROTOBUF - #include "../../utility/ProtoSerialize.hpp" + #include "utility/ProtoSerialize.hpp" #include "depthai/schemas/ImgFrame.pb.h" #endif namespace dai { @@ -293,62 +293,12 @@ Rect ImgFrame::remapRectBetweenFrames(const Rect& originRect, const ImgFrame& or } #ifdef DEPTHAI_ENABLE_PROTOBUF -std::unique_ptr getProtoMessage(const ImgFrame* frame) { - // create and populate ImgFrame protobuf message - auto imgFrame = std::make_unique(); - proto::common::Timestamp* ts = imgFrame->mutable_ts(); - ts->set_sec(frame->ts.sec); - ts->set_nsec(frame->ts.nsec); - proto::common::Timestamp* tsDevice = imgFrame->mutable_tsdevice(); - tsDevice->set_sec(frame->tsDevice.sec); - tsDevice->set_nsec(frame->tsDevice.nsec); - - imgFrame->set_sequencenum(frame->sequenceNum); - - proto::img_frame::Specs* fb = imgFrame->mutable_fb(); - fb->set_type(static_cast(frame->fb.type)); - fb->set_width(frame->fb.width); - fb->set_height(frame->fb.height); - fb->set_stride(frame->fb.stride); - fb->set_bytespp(frame->fb.bytesPP); - fb->set_p1offset(frame->fb.p1Offset); - fb->set_p2offset(frame->fb.p2Offset); - fb->set_p3offset(frame->fb.p3Offset); - - proto::img_frame::Specs* sourceFb = imgFrame->mutable_sourcefb(); - sourceFb->set_type(static_cast(frame->sourceFb.type)); - sourceFb->set_width(frame->sourceFb.width); - sourceFb->set_height(frame->sourceFb.height); - sourceFb->set_stride(frame->sourceFb.stride); - sourceFb->set_bytespp(frame->sourceFb.bytesPP); - sourceFb->set_p1offset(frame->sourceFb.p1Offset); - sourceFb->set_p2offset(frame->sourceFb.p2Offset); - sourceFb->set_p3offset(frame->sourceFb.p3Offset); - - proto::common::CameraSettings* cam = imgFrame->mutable_cam(); - cam->set_exposuretimeus(frame->cam.exposureTimeUs); - cam->set_sensitivityiso(frame->cam.sensitivityIso); - cam->set_lensposition(frame->cam.lensPosition); - cam->set_wbcolortemp(frame->cam.wbColorTemp); - cam->set_lenspositionraw(frame->cam.lensPositionRaw); - - imgFrame->set_instancenum(frame->instanceNum); - - imgFrame->set_category(frame->category); - - proto::common::ImgTransformation* imgTransformation = imgFrame->mutable_transformation(); - utility::serializeImgTransformation(imgTransformation, frame->transformation); - - imgFrame->set_data(frame->data->getData().data(), frame->data->getData().size()); - return imgFrame; -} - ProtoSerializable::SchemaPair ImgFrame::serializeSchema() const { - return utility::serializeSchema(getProtoMessage(this)); + return utility::serializeSchema(utility::getProtoMessage(this)); } -std::vector ImgFrame::serializeProto() const { - return utility::serializeProto(getProtoMessage(this)); +std::vector ImgFrame::serializeProto(bool metadataOnly) const { + return utility::serializeProto(utility::getProtoMessage(this, metadataOnly)); } #endif } // namespace dai diff --git a/src/pipeline/datatype/PointCloudData.cpp b/src/pipeline/datatype/PointCloudData.cpp index 5f5046630..cf286166c 100644 --- a/src/pipeline/datatype/PointCloudData.cpp +++ b/src/pipeline/datatype/PointCloudData.cpp @@ -2,7 +2,7 @@ #include "depthai/common/Point3f.hpp" #ifdef DEPTHAI_ENABLE_PROTOBUF - #include "../../utility/ProtoSerialize.hpp" + #include "utility/ProtoSerialize.hpp" #include "depthai/schemas/PointCloudData.pb.h" #endif namespace dai { @@ -129,41 +129,12 @@ PointCloudData& PointCloudData::setColor(bool val) { } #ifdef DEPTHAI_ENABLE_PROTOBUF -std::unique_ptr getProtoMessage(const PointCloudData* daiCloudData) { - auto pointCloudData = std::make_unique(); - - auto timestamp = pointCloudData->mutable_ts(); - timestamp->set_sec(daiCloudData->ts.sec); - timestamp->set_nsec(daiCloudData->ts.nsec); - - auto timestampDevice = pointCloudData->mutable_tsdevice(); - timestampDevice->set_sec(daiCloudData->tsDevice.sec); - timestampDevice->set_nsec(daiCloudData->tsDevice.nsec); - - pointCloudData->set_sequencenum(daiCloudData->sequenceNum); - pointCloudData->set_width(daiCloudData->getWidth()); - pointCloudData->set_height(daiCloudData->getHeight()); - pointCloudData->set_instancenum(daiCloudData->getInstanceNum()); - pointCloudData->set_minx(daiCloudData->getMinX()); - pointCloudData->set_miny(daiCloudData->getMinY()); - pointCloudData->set_minz(daiCloudData->getMinZ()); - pointCloudData->set_maxx(daiCloudData->getMaxX()); - pointCloudData->set_maxy(daiCloudData->getMaxY()); - pointCloudData->set_maxz(daiCloudData->getMaxZ()); - pointCloudData->set_sparse(daiCloudData->isSparse()); - pointCloudData->set_color(daiCloudData->isColor()); - - pointCloudData->set_data(daiCloudData->data->getData().data(), daiCloudData->data->getSize()); - - return pointCloudData; -} - -std::vector PointCloudData::serializeProto() const { - return utility::serializeProto(getProtoMessage(this)); +std::vector PointCloudData::serializeProto(bool metadataOnly) const { + return utility::serializeProto(utility::getProtoMessage(this, metadataOnly)); } ProtoSerializable::SchemaPair PointCloudData::serializeSchema() const { - return utility::serializeSchema(getProtoMessage(this)); + return utility::serializeSchema(utility::getProtoMessage(this)); } #endif diff --git a/src/pipeline/datatype/SpatialImgDetections.cpp b/src/pipeline/datatype/SpatialImgDetections.cpp index 9253b49a1..84a69209b 100644 --- a/src/pipeline/datatype/SpatialImgDetections.cpp +++ b/src/pipeline/datatype/SpatialImgDetections.cpp @@ -1,78 +1,17 @@ #include "depthai/pipeline/datatype/SpatialImgDetections.hpp" #ifdef DEPTHAI_ENABLE_PROTOBUF - #include "../../utility/ProtoSerialize.hpp" + #include "utility/ProtoSerialize.hpp" #include "depthai/schemas/SpatialImgDetections.pb.h" #endif namespace dai { #ifdef DEPTHAI_ENABLE_PROTOBUF -std::unique_ptr getProtoMessage(const dai::SpatialImgDetections* daiSpatialImgDetections) { - // create and populate SpatialImgDetections protobuf message - auto spatialImgDetections = std::make_unique(); - spatialImgDetections->set_sequencenum(daiSpatialImgDetections->sequenceNum); - - proto::common::Timestamp* ts = spatialImgDetections->mutable_ts(); - ts->set_sec(daiSpatialImgDetections->ts.sec); - ts->set_nsec(daiSpatialImgDetections->ts.nsec); - - proto::common::Timestamp* tsDevice = spatialImgDetections->mutable_tsdevice(); - tsDevice->set_sec(daiSpatialImgDetections->tsDevice.sec); - tsDevice->set_nsec(daiSpatialImgDetections->tsDevice.nsec); - - for(const auto& detection : daiSpatialImgDetections->detections) { - proto::spatial_img_detections::SpatialImgDetection* spatialImgDetection = spatialImgDetections->add_detections(); - - // populate SpatialImgDetection.ImgDetection from struct inheritance - proto::img_detections::ImgDetection* imgDetection = spatialImgDetection->mutable_detection(); - imgDetection->set_label(detection.label); - imgDetection->set_confidence(detection.confidence); - imgDetection->set_xmin(detection.xmin); - imgDetection->set_ymin(detection.ymin); - imgDetection->set_xmax(detection.xmax); - imgDetection->set_ymax(detection.ymax); - - // populate SpatialImgDetection.Point3f - proto::spatial_img_detections::Point3f* spatialCoordinates = spatialImgDetection->mutable_spatialcoordinates(); - spatialCoordinates->set_x(detection.spatialCoordinates.x); - spatialCoordinates->set_y(detection.spatialCoordinates.y); - spatialCoordinates->set_z(detection.spatialCoordinates.z); - - // populate SpatialImgDetection.SpatialLocationCalculatorConfigData - proto::spatial_img_detections::SpatialLocationCalculatorConfigData* boundingBoxMapping = spatialImgDetection->mutable_boundingboxmapping(); - - // populate SpatialImgDetection.SpatialLocationCalculatorConfigData.Rect - proto::spatial_img_detections::Rect* roi = boundingBoxMapping->mutable_roi(); - roi->set_x(detection.boundingBoxMapping.roi.x); - roi->set_y(detection.boundingBoxMapping.roi.y); - roi->set_width(detection.boundingBoxMapping.roi.width); - roi->set_height(detection.boundingBoxMapping.roi.height); - - // populate SpatialImgDetection.SpatialLocationCalculatorConfigData.SpatialLocationCalculatorConfigThresholds - proto::spatial_img_detections::SpatialLocationCalculatorConfigThresholds* depthTresholds = boundingBoxMapping->mutable_depththresholds(); - depthTresholds->set_lowerthreshold(detection.boundingBoxMapping.depthThresholds.lowerThreshold); - depthTresholds->set_upperthreshold(detection.boundingBoxMapping.depthThresholds.upperThreshold); - - // populate SpatialImgDetection.SpatialLocationCalculatorConfigData.SpatialLocationCalculatorAlgorithm - boundingBoxMapping->set_calculationalgorithm( - static_cast(detection.boundingBoxMapping.calculationAlgorithm)); - - // populate SpatialImgDetection.SpatialLocationCalculatorConfigData.stepSize - boundingBoxMapping->set_stepsize(detection.boundingBoxMapping.stepSize); - } - proto::common::ImgTransformation* imgTransformation = spatialImgDetections->mutable_transformation(); - if(daiSpatialImgDetections->transformation.has_value()) { - utility::serializeImgTransformation(imgTransformation, daiSpatialImgDetections->transformation.value()); - } - - return spatialImgDetections; -} - -std::vector SpatialImgDetections::serializeProto() const { - return utility::serializeProto(getProtoMessage(this)); +std::vector SpatialImgDetections::serializeProto(bool) const { + return utility::serializeProto(utility::getProtoMessage(this)); } ProtoSerializable::SchemaPair SpatialImgDetections::serializeSchema() const { - return utility::serializeSchema(getProtoMessage(this)); + return utility::serializeSchema(utility::getProtoMessage(this)); } #endif } // namespace dai diff --git a/src/pipeline/node/Camera.cpp b/src/pipeline/node/Camera.cpp index 40d70963e..e5bb2a44f 100644 --- a/src/pipeline/node/Camera.cpp +++ b/src/pipeline/node/Camera.cpp @@ -3,12 +3,15 @@ #include #include #include +#include // libraries #include // depthai internal +#include "depthai/common/CameraBoardSocket.hpp" #include "utility/ErrorMacros.hpp" +#include "utility/RecordReplayImpl.hpp" namespace dai { namespace node { @@ -103,6 +106,18 @@ std::shared_ptr Camera::build(CameraBoardSocket boardSocket) { return std::static_pointer_cast(shared_from_this()); } +std::shared_ptr Camera::build(CameraBoardSocket boardSocket, ReplayVideo& replay) { + auto cam = build(boardSocket); + cam->setMockIsp(replay); + return cam; +} + +std::shared_ptr Camera::build(ReplayVideo& replay) { + auto cam = build(CameraBoardSocket::AUTO); + cam->setMockIsp(replay); + return cam; +} + Camera::Properties& Camera::getProperties() { properties.initialControl = initialControl; return properties; @@ -163,6 +178,18 @@ Node::Output* Camera::requestOutput(const Capability& capability, bool onHost) { return pimpl->requestOutput(*this, capability, onHost); } +Camera& Camera::setMockIsp(ReplayVideo& replay) { + if(!replay.getReplayVideoFile().empty()) { + const auto& [width, height] = utility::getVideoSize(replay.getReplayVideoFile().string()); + properties.mockIspWidth = width; + properties.mockIspHeight = height; + replay.out.link(mockIsp); + } else { + throw std::runtime_error("ReplayVideo video path not set"); + } + return *this; +} + void Camera::buildStage1() { return pimpl->buildStage1(*this); } @@ -180,6 +207,55 @@ NodeRecordParams Camera::getNodeRecordParams() const { params.name = "Camera" + toString(properties.boardSocket); return params; } +Camera::Input& Camera::getReplayInput() { + return mockIsp; +} +float Camera::getMaxRequestedFps() const { + float maxFps = 0; + for(const auto& outputRequest : pimpl->outputRequests) { + if(outputRequest.capability.fps.value) { + if(const auto* fps = std::get_if(&(*outputRequest.capability.fps.value))) { + maxFps = std::max(maxFps, *fps); + } else if(const auto* fps = std::get_if>(&(*outputRequest.capability.fps.value))) { + maxFps = std::max(maxFps, std::get<1>(*fps)); + } else if(const auto* fps = std::get_if>(&(*outputRequest.capability.fps.value))) { + DAI_CHECK(fps->size() > 0, "When passing a vector to ImgFrameCapability->fps, please pass a non empty vector!"); + maxFps = std::max(maxFps, (*fps)[0]); + } else { + throw std::runtime_error("Unsupported fps value"); + } + } + } + return maxFps == 0 ? 30 : maxFps; +} +uint32_t Camera::getMaxRequestedWidth() const { + uint32_t width = 0; + for(const auto& outputRequest : pimpl->outputRequests) { + auto& spec = outputRequest.capability; + if(spec.size.value) { + if(const auto* size = std::get_if>(&(*spec.size.value))) { + width = std::max(width, size->first); + } else { + DAI_CHECK_IN(false); + } + } + } + return width == 0 ? maxWidth : width; +} +uint32_t Camera::getMaxRequestedHeight() const { + uint32_t height = 0; + for(const auto& outputRequest : pimpl->outputRequests) { + auto& spec = outputRequest.capability; + if(spec.size.value) { + if(const auto* size = std::get_if>(&(*spec.size.value))) { + height = std::max(height, size->second); + } else { + DAI_CHECK_IN(false); + } + } + } + return height == 0 ? maxHeight : height; +} /* Camera::Output& Camera::getRecordOutput() { diff --git a/src/pipeline/node/host/Record.cpp b/src/pipeline/node/host/Record.cpp index 26e628355..385b994c3 100644 --- a/src/pipeline/node/host/Record.cpp +++ b/src/pipeline/node/host/Record.cpp @@ -5,29 +5,37 @@ #include #include "depthai/config/config.hpp" +#include "depthai/pipeline/datatype/DatatypeEnum.hpp" #include "depthai/pipeline/datatype/EncodedFrame.hpp" #include "depthai/pipeline/datatype/IMUData.hpp" #include "depthai/pipeline/datatype/ImgFrame.hpp" +#include "depthai/pipeline/datatype/PointCloudData.hpp" #include "depthai/properties/VideoEncoderProperties.hpp" -#include "depthai/utility/RecordReplaySchema.hpp" #include "depthai/utility/span.hpp" #include "utility/RecordReplayImpl.hpp" +#ifdef DEPTHAI_ENABLE_PROTOBUF + #include "depthai/schemas/EncodedFrame.pb.h" + #include "depthai/schemas/IMUData.pb.h" + #include "depthai/schemas/ImgFrame.pb.h" + #include "depthai/schemas/PointCloudData.pb.h" + #include "utility/ProtoSerializable.hpp" +#endif + namespace dai { namespace node { -enum class StreamType { EncodedVideo, RawVideo, Imu, Byte, Unknown }; - using VideoCodec = dai::utility::VideoRecorder::VideoCodec; void RecordVideo::run() { +#ifdef DEPTHAI_ENABLE_PROTOBUF std::unique_ptr videoRecorder; -#ifdef DEPTHAI_HAVE_OPENCV_SUPPORT + #ifdef DEPTHAI_HAVE_OPENCV_SUPPORT videoRecorder = std::make_unique(); -#else + #else throw std::runtime_error("RecordVideo node requires OpenCV support"); -#endif + #endif utility::ByteRecorder byteRecorder; @@ -36,7 +44,7 @@ void RecordVideo::run() { } bool recordMetadata = !recordMetadataFile.empty(); - StreamType streamType = StreamType::Unknown; + DatatypeEnum streamType = DatatypeEnum::ADatatype; unsigned int width = 0; unsigned int height = 0; unsigned int fps = 0; @@ -46,41 +54,43 @@ void RecordVideo::run() { while(isRunning()) { auto msg = input.get(); if(msg == nullptr) continue; - if(streamType == StreamType::Unknown) { + if(streamType == DatatypeEnum::ADatatype) { if(std::dynamic_pointer_cast(msg) != nullptr) { auto imgFrame = std::dynamic_pointer_cast(msg); if(imgFrame->getType() == dai::ImgFrame::Type::BITSTREAM) throw std::runtime_error( "RecordVideo node does not support encoded ImgFrame messages. Use the `out` output of VideoEncoder to record encoded frames."); - streamType = StreamType::RawVideo; + streamType = DatatypeEnum::ImgFrame; width = imgFrame->getWidth(); height = imgFrame->getHeight(); - if(recordMetadata) byteRecorder.init(recordMetadataFile.string(), compressionLevel, utility::RecordType::Video); + if(recordMetadata) byteRecorder.init(recordMetadataFile.string(), compressionLevel, "video"); } else if(std::dynamic_pointer_cast(msg) != nullptr) { auto encFrame = std::dynamic_pointer_cast(msg); if(encFrame->getProfile() == EncodedFrame::Profile::HEVC) { throw std::runtime_error("RecordVideo node does not support H265 encoding"); } - streamType = StreamType::EncodedVideo; + streamType = DatatypeEnum::EncodedFrame; width = encFrame->getWidth(); height = encFrame->getHeight(); if(logger) logger->trace("RecordVideo node detected {}x{} resolution", width, height); - if(recordMetadata) byteRecorder.init(recordMetadataFile.string(), compressionLevel, utility::RecordType::Video); + if(recordMetadata) byteRecorder.init(recordMetadataFile.string(), compressionLevel, "video"); } else { throw std::runtime_error("RecordVideo can only record video streams."); } if(logger) logger->trace("RecordVideo node detected stream type {}", - streamType == StreamType::RawVideo ? "RawVideo" : streamType == StreamType::EncodedVideo ? "EncodedVideo" : "Byte"); + streamType == DatatypeEnum::ImgFrame ? "RawVideo" + : streamType == DatatypeEnum::EncodedFrame ? "EncodedVideo" + : "Byte"); } - if(streamType == StreamType::RawVideo || streamType == StreamType::EncodedVideo) { + if(streamType == DatatypeEnum::ImgFrame || streamType == DatatypeEnum::EncodedFrame) { if(i == 0) start = msg->getTimestampDevice(); else if(i == fpsInitLength - 1) { end = msg->getTimestampDevice(); fps = roundf((fpsInitLength * 1e6f) / (float)std::chrono::duration_cast(end - start).count()); if(logger) logger->trace("RecordVideo node detected {} fps", fps); - if(streamType == StreamType::EncodedVideo) { + if(streamType == DatatypeEnum::EncodedFrame) { auto encFrame = std::dynamic_pointer_cast(msg); videoRecorder->init(recordVideoFile.string(), width, @@ -93,8 +103,8 @@ void RecordVideo::run() { } if(i >= fpsInitLength - 1) { auto data = msg->getData(); - if(streamType == StreamType::RawVideo) { -#ifdef DEPTHAI_HAVE_OPENCV_SUPPORT + if(streamType == DatatypeEnum::ImgFrame) { + #ifdef DEPTHAI_HAVE_OPENCV_SUPPORT auto imgFrame = std::dynamic_pointer_cast(msg); auto frame = imgFrame->getCvFrame(); bool isGrayscale = imgFrame->getType() == ImgFrame::Type::GRAY8 || imgFrame->getType() == ImgFrame::Type::GRAYF16 @@ -106,36 +116,17 @@ void RecordVideo::run() { span cvData(frame.data, frame.total() * frame.elemSize()); videoRecorder->write(cvData, frame.step); if(recordMetadata) { - utility::VideoRecordSchema record; - record.timestamp.set(std::chrono::duration_cast(imgFrame->getTimestampDevice().time_since_epoch())); - record.sequenceNumber = imgFrame->getSequenceNum(); - record.instanceNumber = imgFrame->getInstanceNum(); - record.width = imgFrame->getWidth(); - record.height = imgFrame->getHeight(); - record.cameraSettings.exposure = imgFrame->cam.exposureTimeUs; - record.cameraSettings.sensitivity = imgFrame->cam.sensitivityIso; - record.cameraSettings.wbColorTemp = imgFrame->cam.wbColorTemp; - record.cameraSettings.lensPosition = imgFrame->cam.lensPosition; - byteRecorder.write(record); + byteRecorder.write(imgFrame->serializeProto(true)); } -#else + #else throw std::runtime_error("RecordVideo node requires OpenCV support"); -#endif + #endif } else { videoRecorder->write(data); if(recordMetadata) { auto encFrame = std::dynamic_pointer_cast(msg); - utility::VideoRecordSchema record; - record.timestamp.set(std::chrono::duration_cast(encFrame->getTimestampDevice().time_since_epoch())); - record.sequenceNumber = encFrame->getSequenceNum(); - record.instanceNumber = encFrame->getInstanceNum(); - record.width = encFrame->getWidth(); - record.height = encFrame->getHeight(); - record.cameraSettings.exposure = encFrame->cam.exposureTimeUs; - record.cameraSettings.sensitivity = encFrame->cam.sensitivityIso; - record.cameraSettings.wbColorTemp = encFrame->cam.wbColorTemp; - record.cameraSettings.lensPosition = encFrame->cam.lensPosition; - byteRecorder.write(record); + auto imgFrame = encFrame->getImgFrameMeta(); + byteRecorder.write(imgFrame.serializeProto(true)); } } } @@ -146,74 +137,46 @@ void RecordVideo::run() { } videoRecorder->close(); +#else + throw std::runtime_error("RecordVideo node requires protobuf support"); +#endif } void RecordMetadataOnly::run() { +#ifdef DEPTHAI_ENABLE_PROTOBUF utility::ByteRecorder byteRecorder; - StreamType streamType = StreamType::Unknown; + DatatypeEnum streamType = DatatypeEnum::ADatatype; while(isRunning()) { auto msg = input.get(); if(msg == nullptr) continue; - if(streamType == StreamType::Unknown) { - if(std::dynamic_pointer_cast(msg) != nullptr) { - streamType = StreamType::Imu; - byteRecorder.init(recordFile.string(), compressionLevel, utility::RecordType::Imu); + if(streamType == DatatypeEnum::ADatatype) { + if(std::dynamic_pointer_cast(msg) != nullptr) { + streamType = DatatypeEnum::ImgFrame; + byteRecorder.init(recordFile.string(), compressionLevel, "img_frame"); + } else if(std::dynamic_pointer_cast(msg) != nullptr) { + streamType = DatatypeEnum::IMUData; + byteRecorder.init(recordFile.string(), compressionLevel, "imu"); + } else if(std::dynamic_pointer_cast(msg) != nullptr) { + streamType = DatatypeEnum::EncodedFrame; + byteRecorder.init(recordFile.string(), compressionLevel, "encoded_frame"); + } else if(std::dynamic_pointer_cast(msg) != nullptr) { + streamType = DatatypeEnum::PointCloudData; + byteRecorder.init(recordFile.string(), compressionLevel, "point_cloud"); } else { throw std::runtime_error("RecordMetadataOnly node does not support this type of message"); } - if(logger) - logger->trace("RecordMetadataOnly node detected stream type {}", - streamType == StreamType::RawVideo ? "RawVideo" : streamType == StreamType::EncodedVideo ? "EncodedVideo" : "Byte"); + if(logger) logger->trace("RecordMetadataOnly node detected stream type {}", (int)streamType); } - if(streamType == StreamType::Imu) { - auto imuData = std::dynamic_pointer_cast(msg); - utility::IMURecordSchema record; - record.packets.reserve(imuData->packets.size()); - for(const auto& packet : imuData->packets) { - utility::IMUPacketSchema packetSchema; - // Acceleration - packetSchema.acceleration.timestamp.set( - std::chrono::duration_cast(packet.acceleroMeter.getTimestampDevice().time_since_epoch())); - packetSchema.acceleration.sequenceNumber = packet.acceleroMeter.sequence; - packetSchema.acceleration.accuracy = (utility::IMUReportSchema::Accuracy)packet.gyroscope.accuracy; - packetSchema.acceleration.x = packet.acceleroMeter.x; - packetSchema.acceleration.y = packet.acceleroMeter.y; - packetSchema.acceleration.z = packet.acceleroMeter.z; - // Orientation - packetSchema.orientation.timestamp.set( - std::chrono::duration_cast(packet.gyroscope.getTimestampDevice().time_since_epoch())); - packetSchema.orientation.sequenceNumber = packet.gyroscope.sequence; - packetSchema.orientation.accuracy = (utility::IMUReportSchema::Accuracy)packet.gyroscope.accuracy; - packetSchema.orientation.x = packet.gyroscope.x; - packetSchema.orientation.y = packet.gyroscope.y; - packetSchema.orientation.z = packet.gyroscope.z; - // Magnetic field - packetSchema.magneticField.timestamp.set( - std::chrono::duration_cast(packet.magneticField.getTimestampDevice().time_since_epoch())); - packetSchema.magneticField.sequenceNumber = packet.magneticField.sequence; - packetSchema.magneticField.accuracy = (utility::IMUReportSchema::Accuracy)packet.magneticField.accuracy; - packetSchema.magneticField.x = packet.magneticField.x; - packetSchema.magneticField.y = packet.magneticField.y; - packetSchema.magneticField.z = packet.magneticField.z; - // Rotation with accuracy - packetSchema.rotationVector.timestamp.set( - std::chrono::duration_cast(packet.rotationVector.getTimestampDevice().time_since_epoch())); - packetSchema.rotationVector.sequenceNumber = packet.rotationVector.sequence; - packetSchema.rotationVector.accuracy = (utility::IMUReportSchema::Accuracy)packet.rotationVector.accuracy; - packetSchema.rotationVector.i = packet.rotationVector.i; - packetSchema.rotationVector.j = packet.rotationVector.j; - packetSchema.rotationVector.k = packet.rotationVector.k; - packetSchema.rotationVector.real = packet.rotationVector.real; - packetSchema.rotationVector.rotationAccuracy = packet.rotationVector.rotationVectorAccuracy; - - record.packets.push_back(packetSchema); - } - byteRecorder.write(record); - } else { + auto serializable = std::dynamic_pointer_cast(msg); + if(serializable == nullptr) { throw std::runtime_error("RecordMetadataOnly unsupported message type"); } + byteRecorder.write(serializable->serializeProto()); } +#else + throw std::runtime_error("RecordMetadataOnly node requires protobuf support"); +#endif } std::filesystem::path RecordVideo::getRecordMetadataFile() const { diff --git a/src/pipeline/node/host/Replay.cpp b/src/pipeline/node/host/Replay.cpp index 5248bad4f..671841654 100644 --- a/src/pipeline/node/host/Replay.cpp +++ b/src/pipeline/node/host/Replay.cpp @@ -1,6 +1,9 @@ -#define _USE_MATH_DEFINES -#include "depthai/pipeline/node/host/Replay.hpp" +#include +#include "depthai/pipeline/datatype/DatatypeEnum.hpp" +#include "depthai/pipeline/datatype/EncodedFrame.hpp" +#include "depthai/pipeline/datatype/PointCloudData.hpp" +#define _USE_MATH_DEFINES #include #include #include @@ -9,46 +12,159 @@ #include "depthai/pipeline/datatype/IMUData.hpp" #include "depthai/pipeline/datatype/ImgFrame.hpp" -#include "depthai/utility/RecordReplaySchema.hpp" +#include "depthai/pipeline/node/host/Replay.hpp" #include "utility/RecordReplayImpl.hpp" +#ifdef DEPTHAI_ENABLE_PROTOBUF + #include + + #include "depthai/schemas/EncodedFrame.pb.h" + #include "depthai/schemas/IMUData.pb.h" + #include "depthai/schemas/ImgFrame.pb.h" + #include "depthai/schemas/PointCloudData.pb.h" + #include "utility/ProtoSerialize.hpp" +#endif + namespace dai { namespace node { +#ifdef DEPTHAI_ENABLE_PROTOBUF // Video Message -std::shared_ptr getVideoMessage(const nlohmann::json& metadata, ImgFrame::Type outFrameType, std::vector& frame) { - // TODO(asahtik): Handle versions - utility::VideoRecordSchema recordSchema = metadata; - ImgFrame imgFrame = recordSchema.getMessage(); - - assert(frame.size() == recordSchema.width * recordSchema.height * 3); - cv::Mat img(recordSchema.height, recordSchema.width, CV_8UC3, frame.data()); - imgFrame.setCvFrame(img, outFrameType); - imgFrame.sourceFb = imgFrame.fb; - return std::dynamic_pointer_cast(std::make_shared(imgFrame)); +inline std::shared_ptr getVideoMessage(const proto::img_frame::ImgFrame& metadata, ImgFrame::Type outFrameType, std::vector& frame) { + auto imgFrame = std::make_shared(); + utility::setProtoMessage(*imgFrame, &metadata, true); + + assert(frame.size() == imgFrame->getWidth() * imgFrame->getHeight() * 3); + cv::Mat img(imgFrame->getHeight(), imgFrame->getWidth(), CV_8UC3, frame.data()); + imgFrame->setCvFrame(img, outFrameType); + return std::dynamic_pointer_cast(imgFrame); +} + +inline std::shared_ptr getMessage(const std::shared_ptr metadata, DatatypeEnum datatype) { + switch(datatype) { + case DatatypeEnum::ImgFrame: { + auto imgFrame = std::make_shared(); + utility::setProtoMessage(*imgFrame, metadata.get(), false); + return imgFrame; + } + case DatatypeEnum::EncodedFrame: { + auto encFrame = std::make_shared(); + utility::setProtoMessage(*encFrame, metadata.get(), false); + return encFrame; + } + case DatatypeEnum::IMUData: { + auto imuData = std::make_shared(); + utility::setProtoMessage(*imuData, metadata.get(), false); + return imuData; + } + case DatatypeEnum::PointCloudData: { + auto pclData = std::make_shared(); + utility::setProtoMessage(*pclData, metadata.get(), false); + return pclData; + } + case DatatypeEnum::ADatatype: + case DatatypeEnum::Buffer: + case DatatypeEnum::NNData: + case DatatypeEnum::ImageManipConfig: + case DatatypeEnum::ImageManipConfigV2: + case DatatypeEnum::CameraControl: + case DatatypeEnum::ImgDetections: + case DatatypeEnum::SpatialImgDetections: + case DatatypeEnum::SystemInformation: + case DatatypeEnum::SystemInformationS3: + case DatatypeEnum::SpatialLocationCalculatorConfig: + case DatatypeEnum::SpatialLocationCalculatorData: + case DatatypeEnum::EdgeDetectorConfig: + case DatatypeEnum::AprilTagConfig: + case DatatypeEnum::AprilTags: + case DatatypeEnum::Tracklets: + case DatatypeEnum::StereoDepthConfig: + case DatatypeEnum::FeatureTrackerConfig: + case DatatypeEnum::ThermalConfig: + case DatatypeEnum::ToFConfig: + case DatatypeEnum::TrackedFeatures: + case DatatypeEnum::BenchmarkReport: + case DatatypeEnum::MessageGroup: + case DatatypeEnum::TransformData: + case DatatypeEnum::PointCloudConfig: + case DatatypeEnum::ImageAlignConfig: + case DatatypeEnum::ImgAnnotations: + break; + } + throw std::runtime_error("Cannot replay message type: " + std::to_string((int)datatype)); } -std::shared_ptr getMessage(const nlohmann::json& metadata, utility::RecordType type) { - switch(type) { - case utility::RecordType::Other: - case utility::RecordType::Video: - throw std::runtime_error("Invalid message type"); +inline std::shared_ptr getProtoMessage(utility::BytePlayer& bytePlayer, DatatypeEnum datatype) { + switch(datatype) { + case DatatypeEnum::ImgFrame: { + auto msg = bytePlayer.next(); + if(msg.has_value()) { + return std::make_shared(msg.value()); + } break; - case utility::RecordType::Imu: { - utility::IMURecordSchema recordSchema = metadata; - IMUData imuData = recordSchema.getMessage(); - return std::dynamic_pointer_cast(std::make_shared(imuData)); } + case DatatypeEnum::IMUData: { + auto msg = bytePlayer.next(); + if(msg.has_value()) { + return std::make_shared(msg.value()); + } + break; + } + case DatatypeEnum::EncodedFrame: { + auto msg = bytePlayer.next(); + if(msg.has_value()) { + return std::make_shared(msg.value()); + } + break; + } + case DatatypeEnum::PointCloudData: { + auto msg = bytePlayer.next(); + if(msg.has_value()) { + return std::make_shared(msg.value()); + } + break; + } + case DatatypeEnum::ADatatype: + case DatatypeEnum::Buffer: + case DatatypeEnum::NNData: + case DatatypeEnum::ImageManipConfig: + case DatatypeEnum::ImageManipConfigV2: + case DatatypeEnum::CameraControl: + case DatatypeEnum::ImgDetections: + case DatatypeEnum::SpatialImgDetections: + case DatatypeEnum::SystemInformation: + case DatatypeEnum::SystemInformationS3: + case DatatypeEnum::SpatialLocationCalculatorConfig: + case DatatypeEnum::SpatialLocationCalculatorData: + case DatatypeEnum::EdgeDetectorConfig: + case DatatypeEnum::AprilTagConfig: + case DatatypeEnum::AprilTags: + case DatatypeEnum::Tracklets: + case DatatypeEnum::StereoDepthConfig: + case DatatypeEnum::FeatureTrackerConfig: + case DatatypeEnum::ThermalConfig: + case DatatypeEnum::ToFConfig: + case DatatypeEnum::TrackedFeatures: + case DatatypeEnum::BenchmarkReport: + case DatatypeEnum::MessageGroup: + case DatatypeEnum::TransformData: + case DatatypeEnum::PointCloudConfig: + case DatatypeEnum::ImageAlignConfig: + case DatatypeEnum::ImgAnnotations: + throw std::runtime_error("Cannot replay message type: " + std::to_string((int)datatype)); } return {}; } +#endif void ReplayVideo::run() { +#ifdef DEPTHAI_ENABLE_PROTOBUF if(replayVideo.empty() && replayFile.empty()) { throw std::runtime_error("ReplayVideo node requires replayVideo or replayFile to be set"); } utility::VideoPlayer videoPlayer; utility::BytePlayer bytePlayer; + DatatypeEnum datatype = DatatypeEnum::ImgFrame; bool hasVideo = !replayVideo.empty(); bool hasMetadata = !replayFile.empty(); if(!replayVideo.empty()) try { @@ -62,15 +178,12 @@ void ReplayVideo::run() { if(logger) logger->warn("Video not replaying: {}", e.what()); } if(!replayFile.empty()) try { - bytePlayer.init(replayFile.string()); + auto schemaName = bytePlayer.init(replayFile.string()); + datatype = utility::schemaNameToDatatype(schemaName); } catch(const std::exception& e) { hasMetadata = false; if(logger) logger->warn("Metadata not replaying: {}", e.what()); } - utility::RecordType type = utility::RecordType::Other; - if(hasVideo && !hasMetadata) { - type = utility::RecordType::Video; - } if(!hasVideo) { throw std::runtime_error("Video file not found or could not be opened"); } @@ -80,23 +193,20 @@ void ReplayVideo::run() { auto loopStart = std::chrono::steady_clock::now(); auto prevMsgTs = loopStart; while(isRunning()) { - nlohmann::json metadata; + std::shared_ptr metadata; std::vector frame; if(hasMetadata) { - auto msg = bytePlayer.next(); - if(msg.has_value()) { - metadata = msg.value(); - if(first) { - type = metadata["type"].get(); - if(type != utility::RecordType::Video) { - throw std::runtime_error("Invalid message type, expected a video stream"); - } - } + if(datatype != DatatypeEnum::ImgFrame) { + throw std::runtime_error("Invalid message type, expected ImgFrame"); + } + auto msg = getProtoMessage(bytePlayer, datatype); + if(msg != nullptr) { + metadata = std::dynamic_pointer_cast(msg); } else if(!first) { // End of file if(loop) { bytePlayer.restart(); - if(hasVideo && type == utility::RecordType::Video) { + if(hasVideo) { videoPlayer.restart(); } continue; @@ -106,7 +216,7 @@ void ReplayVideo::run() { hasMetadata = false; } } - if(hasVideo && type == utility::RecordType::Video) { + if(hasVideo) { auto msg = videoPlayer.next(); if(msg.has_value()) { frame = msg.value(); @@ -129,28 +239,29 @@ void ReplayVideo::run() { break; } - if(!hasVideo && type == utility::RecordType::Video) { + if(!hasVideo) { throw std::runtime_error("Video file not found"); } - if(!hasMetadata && type == utility::RecordType::Video) { - utility::VideoRecordSchema recordSchema; + if(!hasMetadata) { + ImgFrame frame; auto time = std::chrono::steady_clock::now() - start; const auto& [width, height] = videoPlayer.size(); - recordSchema.type = utility::RecordType::Video; - recordSchema.timestamp.set(time); - recordSchema.sequenceNumber = index++; - recordSchema.width = width; - recordSchema.height = height; - metadata = recordSchema; - } else if(type == utility::RecordType::Video && size.has_value()) { - utility::VideoRecordSchema recordSchema = metadata; - recordSchema.width = std::get<0>(size.value()); - recordSchema.height = std::get<1>(size.value()); - metadata = recordSchema; + frame.setWidth(width); + frame.setHeight(height); + frame.setType(outFrameType); + frame.setTimestampDevice(std::chrono::time_point(time)); + frame.setSequenceNum(index++); + frame.sourceFb = frame.fb; + auto protoMsg = utility::getProtoMessage(&frame, true); + std::shared_ptr sharedProtoMsg = std::move(protoMsg); + metadata = std::dynamic_pointer_cast(sharedProtoMsg); + } else if(size.has_value()) { + metadata->mutable_fb()->set_width(std::get<0>(size.value())); + metadata->mutable_fb()->set_height(std::get<1>(size.value())); } - auto buffer = getVideoMessage(metadata, outFrameType, frame); + auto buffer = getVideoMessage(*metadata, outFrameType, frame); if(first) prevMsgTs = buffer->getTimestampDevice(); @@ -173,16 +284,22 @@ void ReplayVideo::run() { } logger->info("Replay finished - stopping the pipeline!"); stopPipeline(); +#else + throw std::runtime_error("ReplayVideo node requires protobuf support"); +#endif } void ReplayMetadataOnly::run() { +#ifdef DEPTHAI_ENABLE_PROTOBUF if(replayFile.empty()) { throw std::runtime_error("ReplayMetadataOnly node requires replayFile to be set"); } utility::BytePlayer bytePlayer; + DatatypeEnum datatype = DatatypeEnum::Buffer; bool hasMetadata = !replayFile.empty(); if(!replayFile.empty()) try { - bytePlayer.init(replayFile.string()); + auto schemaName = bytePlayer.init(replayFile.string()); + datatype = utility::schemaNameToDatatype(schemaName); } catch(const std::exception& e) { hasMetadata = false; if(logger) logger->warn("Metadata not replaying: {}", e.what()); @@ -190,19 +307,18 @@ void ReplayMetadataOnly::run() { if(!hasMetadata) { throw std::runtime_error("Metadata file not found"); } - utility::RecordType type = utility::RecordType::Other; bool first = true; auto loopStart = std::chrono::steady_clock::now(); auto prevMsgTs = loopStart; while(isRunning()) { - nlohmann::json metadata; + std::shared_ptr metadata; std::vector frame; - auto msg = bytePlayer.next(); - if(msg.has_value()) { - metadata = msg.value(); - if(first) { - type = metadata["type"].get(); - } + if(!utility::deserializationSupported(datatype)) { + throw std::runtime_error("Invalid message type. Cannot replay"); + } + auto msg = getProtoMessage(bytePlayer, datatype); + if(msg != nullptr) { + metadata = msg; } else if(!first) { // End of file if(loop) { @@ -213,7 +329,7 @@ void ReplayMetadataOnly::run() { } else { throw std::runtime_error("Metadata file contains no messages"); } - auto buffer = getMessage(metadata, type); + auto buffer = getMessage(metadata, datatype); if(first) prevMsgTs = buffer->getTimestampDevice(); @@ -233,6 +349,9 @@ void ReplayMetadataOnly::run() { first = false; } stopPipeline(); +#else + throw std::runtime_error("ReplayMetadataOnly node requires protobuf support"); +#endif } std::filesystem::path ReplayVideo::getReplayMetadataFile() const { diff --git a/src/utility/HolisticRecordReplay.hpp b/src/utility/HolisticRecordReplay.hpp index 81fac9a64..12ed59cd9 100644 --- a/src/utility/HolisticRecordReplay.hpp +++ b/src/utility/HolisticRecordReplay.hpp @@ -14,12 +14,14 @@ namespace utility { bool setupHolisticRecord(Pipeline& pipeline, const std::string& deviceId, RecordConfig& recordConfig, - std::unordered_map& outFilenames); + std::unordered_map& outFilenames, + bool legacy = false); bool setupHolisticReplay(Pipeline& pipeline, std::string replayPath, const std::string& deviceId, RecordConfig& recordConfig, - std::unordered_map& outFilenames); + std::unordered_map& outFilenames, + bool legacy = false); #endif } // namespace utility diff --git a/src/utility/ProtoSerialize.cpp b/src/utility/ProtoSerialize.cpp index 43a11090e..f261f1afd 100644 --- a/src/utility/ProtoSerialize.cpp +++ b/src/utility/ProtoSerialize.cpp @@ -6,6 +6,8 @@ #include +#include "depthai/schemas/PointCloudData.pb.h" + namespace dai { namespace utility { // // Writes the FileDescriptor of this descriptor and all transitive dependencies @@ -82,6 +84,628 @@ void serializeImgTransformation(proto::common::ImgTransformation* imgTransformat distortionCoefficients->add_values(value); } } +ImgTransformation deserializeImgTransformation(const proto::common::ImgTransformation& imgTransformation) { + std::array, 3> transformationMatrix; + std::array, 3> sourceIntrinsicMatrix; + std::vector distortionCoefficients; + distortionCoefficients.reserve(imgTransformation.distortioncoefficients().values_size()); + for(auto i = 0U; i < 3; ++i) + for(auto j = 0U; j < 3; ++j) transformationMatrix[i][j] = imgTransformation.transformationmatrix().arrays(i).values(j); + for(auto i = 0U; i < 3; ++i) + for(auto j = 0U; j < 3; ++j) sourceIntrinsicMatrix[i][j] = imgTransformation.sourceintrinsicmatrix().arrays(i).values(j); + for(auto i = 0; i < imgTransformation.distortioncoefficients().values_size(); ++i) + distortionCoefficients.push_back(imgTransformation.distortioncoefficients().values(i)); + ImgTransformation transformation; + transformation = ImgTransformation(imgTransformation.srcwidth(), + imgTransformation.srcheight(), + sourceIntrinsicMatrix, + static_cast(imgTransformation.distortionmodel()), + distortionCoefficients); + if(transformation.isValid()) { + transformation.addTransformation(transformationMatrix); + transformation.addCrop(0, 0, imgTransformation.width(), imgTransformation.height()); + } + return transformation; +} + +DatatypeEnum schemaNameToDatatype(const std::string& schemaName) { + if(schemaName == proto::encoded_frame::EncodedFrame::descriptor()->full_name()) { + return DatatypeEnum::EncodedFrame; + } else if(schemaName == proto::imu_data::IMUData::descriptor()->full_name()) { + return DatatypeEnum::IMUData; + } else if(schemaName == proto::image_annotations::ImageAnnotations::descriptor()->full_name()) { + return DatatypeEnum::ImgAnnotations; + } else if(schemaName == proto::img_detections::ImgDetections::descriptor()->full_name()) { + return DatatypeEnum::ImgDetections; + } else if(schemaName == proto::img_frame::ImgFrame::descriptor()->full_name()) { + return DatatypeEnum::ImgFrame; + } else if(schemaName == proto::point_cloud_data::PointCloudData::descriptor()->full_name()) { + return DatatypeEnum::PointCloudData; + } else if(schemaName == proto::spatial_img_detections::SpatialImgDetections::descriptor()->full_name()) { + return DatatypeEnum::SpatialImgDetections; + } else { + throw std::runtime_error("Unknown schema name: " + schemaName); + } +} + +bool deserializationSupported(DatatypeEnum datatype) { + switch(datatype) { + case DatatypeEnum::ImgFrame: + case DatatypeEnum::EncodedFrame: + case DatatypeEnum::IMUData: + case DatatypeEnum::PointCloudData: + return true; + case DatatypeEnum::ADatatype: + case DatatypeEnum::Buffer: + case DatatypeEnum::NNData: + case DatatypeEnum::ImageManipConfig: + case DatatypeEnum::ImageManipConfigV2: + case DatatypeEnum::CameraControl: + case DatatypeEnum::ImgDetections: + case DatatypeEnum::SpatialImgDetections: + case DatatypeEnum::SystemInformation: + case DatatypeEnum::SystemInformationS3: + case DatatypeEnum::SpatialLocationCalculatorConfig: + case DatatypeEnum::SpatialLocationCalculatorData: + case DatatypeEnum::EdgeDetectorConfig: + case DatatypeEnum::AprilTagConfig: + case DatatypeEnum::AprilTags: + case DatatypeEnum::Tracklets: + case DatatypeEnum::StereoDepthConfig: + case DatatypeEnum::FeatureTrackerConfig: + case DatatypeEnum::ThermalConfig: + case DatatypeEnum::ToFConfig: + case DatatypeEnum::TrackedFeatures: + case DatatypeEnum::BenchmarkReport: + case DatatypeEnum::MessageGroup: + case DatatypeEnum::TransformData: + case DatatypeEnum::PointCloudConfig: + case DatatypeEnum::ImageAlignConfig: + case DatatypeEnum::ImgAnnotations: + return false; + } + return false; +} + +template <> +std::unique_ptr getProtoMessage(const ImgAnnotations* message, bool) { + auto imageAnnotations = std::make_unique(); + + imageAnnotations->set_sequencenum(message->sequenceNum); + proto::common::Timestamp* ts = imageAnnotations->mutable_ts(); + ts->set_sec(message->ts.sec); + ts->set_nsec(message->ts.nsec); + proto::common::Timestamp* tsDevice = imageAnnotations->mutable_tsdevice(); + tsDevice->set_sec(message->tsDevice.sec); + tsDevice->set_nsec(message->tsDevice.nsec); + + for(const auto& annotation : message->annotations) { + proto::image_annotations::ImageAnnotation* imageAnnotation = imageAnnotations->add_annotations(); + for(const auto& circle : annotation.circles) { + proto::image_annotations::CircleAnnotation* circleAnnotation = imageAnnotation->add_circles(); + circleAnnotation->mutable_position()->set_x(circle.position.x); + circleAnnotation->mutable_position()->set_y(circle.position.y); + circleAnnotation->set_diameter(circle.diameter); + circleAnnotation->set_thickness(circle.thickness); + circleAnnotation->mutable_fillcolor()->set_r(circle.fillColor.r); + circleAnnotation->mutable_fillcolor()->set_g(circle.fillColor.g); + circleAnnotation->mutable_fillcolor()->set_b(circle.fillColor.b); + circleAnnotation->mutable_fillcolor()->set_a(circle.fillColor.a); + circleAnnotation->mutable_outlinecolor()->set_r(circle.outlineColor.r); + circleAnnotation->mutable_outlinecolor()->set_g(circle.outlineColor.g); + circleAnnotation->mutable_outlinecolor()->set_b(circle.outlineColor.b); + circleAnnotation->mutable_outlinecolor()->set_a(circle.outlineColor.a); + } + for(const auto& points : annotation.points) { + proto::image_annotations::PointsAnnotation* pointsAnnotation = imageAnnotation->add_points(); + PointsAnnotationType type = points.type; + pointsAnnotation->set_type(static_cast(type)); + for(const auto& point : points.points) { + proto::common::Point2f* protoPoint = pointsAnnotation->add_points(); + protoPoint->set_x(point.x); + protoPoint->set_y(point.y); + } + pointsAnnotation->mutable_outlinecolor()->set_r(points.outlineColor.r); + pointsAnnotation->mutable_outlinecolor()->set_g(points.outlineColor.g); + pointsAnnotation->mutable_outlinecolor()->set_b(points.outlineColor.b); + pointsAnnotation->mutable_outlinecolor()->set_a(points.outlineColor.a); + for(const auto& color : points.outlineColors) { + proto::common::Color* protoColor = pointsAnnotation->add_outlinecolors(); + protoColor->set_r(color.r); + protoColor->set_g(color.g); + protoColor->set_b(color.b); + protoColor->set_a(color.a); + } + pointsAnnotation->mutable_fillcolor()->set_r(points.fillColor.r); + pointsAnnotation->mutable_fillcolor()->set_g(points.fillColor.g); + pointsAnnotation->mutable_fillcolor()->set_b(points.fillColor.b); + pointsAnnotation->mutable_fillcolor()->set_a(points.fillColor.a); + pointsAnnotation->set_thickness(points.thickness); + } + for(const auto& text : annotation.texts) { + proto::image_annotations::TextAnnotation* textAnnotation = imageAnnotation->add_texts(); + textAnnotation->mutable_position()->set_x(text.position.x); + textAnnotation->mutable_position()->set_y(text.position.y); + textAnnotation->set_text(text.text); + textAnnotation->set_fontsize(text.fontSize); + textAnnotation->mutable_textcolor()->set_r(text.textColor.r); + textAnnotation->mutable_textcolor()->set_g(text.textColor.g); + textAnnotation->mutable_textcolor()->set_b(text.textColor.b); + textAnnotation->mutable_textcolor()->set_a(text.textColor.a); + textAnnotation->mutable_backgroundcolor()->set_r(text.backgroundColor.r); + textAnnotation->mutable_backgroundcolor()->set_g(text.backgroundColor.g); + textAnnotation->mutable_backgroundcolor()->set_b(text.backgroundColor.b); + textAnnotation->mutable_backgroundcolor()->set_a(text.backgroundColor.a); + } + } + return imageAnnotations; +} +template <> +std::unique_ptr getProtoMessage(const SpatialImgDetections* message, bool) { + // create and populate SpatialImgDetections protobuf message + auto spatialImgDetections = std::make_unique(); + spatialImgDetections->set_sequencenum(message->sequenceNum); + + proto::common::Timestamp* ts = spatialImgDetections->mutable_ts(); + ts->set_sec(message->ts.sec); + ts->set_nsec(message->ts.nsec); + + proto::common::Timestamp* tsDevice = spatialImgDetections->mutable_tsdevice(); + tsDevice->set_sec(message->tsDevice.sec); + tsDevice->set_nsec(message->tsDevice.nsec); + + for(const auto& detection : message->detections) { + proto::spatial_img_detections::SpatialImgDetection* spatialImgDetection = spatialImgDetections->add_detections(); + + // populate SpatialImgDetection.ImgDetection from struct inheritance + proto::img_detections::ImgDetection* imgDetection = spatialImgDetection->mutable_detection(); + imgDetection->set_label(detection.label); + imgDetection->set_confidence(detection.confidence); + imgDetection->set_xmin(detection.xmin); + imgDetection->set_ymin(detection.ymin); + imgDetection->set_xmax(detection.xmax); + imgDetection->set_ymax(detection.ymax); + + // populate SpatialImgDetection.Point3f + proto::spatial_img_detections::Point3f* spatialCoordinates = spatialImgDetection->mutable_spatialcoordinates(); + spatialCoordinates->set_x(detection.spatialCoordinates.x); + spatialCoordinates->set_y(detection.spatialCoordinates.y); + spatialCoordinates->set_z(detection.spatialCoordinates.z); + + // populate SpatialImgDetection.SpatialLocationCalculatorConfigData + proto::spatial_img_detections::SpatialLocationCalculatorConfigData* boundingBoxMapping = spatialImgDetection->mutable_boundingboxmapping(); + + // populate SpatialImgDetection.SpatialLocationCalculatorConfigData.Rect + proto::spatial_img_detections::Rect* roi = boundingBoxMapping->mutable_roi(); + roi->set_x(detection.boundingBoxMapping.roi.x); + roi->set_y(detection.boundingBoxMapping.roi.y); + roi->set_width(detection.boundingBoxMapping.roi.width); + roi->set_height(detection.boundingBoxMapping.roi.height); + + // populate SpatialImgDetection.SpatialLocationCalculatorConfigData.SpatialLocationCalculatorConfigThresholds + proto::spatial_img_detections::SpatialLocationCalculatorConfigThresholds* depthTresholds = boundingBoxMapping->mutable_depththresholds(); + depthTresholds->set_lowerthreshold(detection.boundingBoxMapping.depthThresholds.lowerThreshold); + depthTresholds->set_upperthreshold(detection.boundingBoxMapping.depthThresholds.upperThreshold); + + // populate SpatialImgDetection.SpatialLocationCalculatorConfigData.SpatialLocationCalculatorAlgorithm + boundingBoxMapping->set_calculationalgorithm( + static_cast(detection.boundingBoxMapping.calculationAlgorithm)); + + // populate SpatialImgDetection.SpatialLocationCalculatorConfigData.stepSize + boundingBoxMapping->set_stepsize(detection.boundingBoxMapping.stepSize); + } + proto::common::ImgTransformation* imgTransformation = spatialImgDetections->mutable_transformation(); + if(message->transformation.has_value()) { + utility::serializeImgTransformation(imgTransformation, message->transformation.value()); + } + + return spatialImgDetections; +} +template <> +std::unique_ptr getProtoMessage(const IMUData* message, bool) { + // create and populate ImgFrame protobuf message + auto imuData = std::make_unique(); + auto imuPackets = imuData->mutable_packets(); + imuPackets->Reserve(message->packets.size()); + for(auto packet : message->packets) { + auto imuPacket = imuPackets->Add(); + auto imuAccelerometer = imuPacket->mutable_accelerometer(); + imuAccelerometer->mutable_vec()->set_x(packet.acceleroMeter.x); + imuAccelerometer->mutable_vec()->set_y(packet.acceleroMeter.y); + imuAccelerometer->mutable_vec()->set_z(packet.acceleroMeter.z); + imuAccelerometer->mutable_report()->set_accuracy(static_cast(packet.acceleroMeter.accuracy)); + imuAccelerometer->mutable_report()->set_sequence(packet.acceleroMeter.sequence); + imuAccelerometer->mutable_report()->mutable_ts()->set_sec(packet.acceleroMeter.timestamp.sec); + imuAccelerometer->mutable_report()->mutable_ts()->set_nsec(packet.acceleroMeter.timestamp.nsec); + imuAccelerometer->mutable_report()->mutable_tsdevice()->set_sec(packet.acceleroMeter.tsDevice.sec); + imuAccelerometer->mutable_report()->mutable_tsdevice()->set_nsec(packet.acceleroMeter.tsDevice.nsec); + + auto imuGyroscope = imuPacket->mutable_gyroscope(); + imuGyroscope->mutable_vec()->set_x(packet.gyroscope.x); + imuGyroscope->mutable_vec()->set_y(packet.gyroscope.y); + imuGyroscope->mutable_vec()->set_z(packet.gyroscope.z); + imuGyroscope->mutable_report()->set_accuracy(static_cast(packet.gyroscope.accuracy)); + imuGyroscope->mutable_report()->set_sequence(packet.gyroscope.sequence); + imuGyroscope->mutable_report()->mutable_ts()->set_sec(packet.gyroscope.timestamp.sec); + imuGyroscope->mutable_report()->mutable_ts()->set_nsec(packet.gyroscope.timestamp.nsec); + imuGyroscope->mutable_report()->mutable_tsdevice()->set_sec(packet.gyroscope.tsDevice.sec); + imuGyroscope->mutable_report()->mutable_tsdevice()->set_nsec(packet.gyroscope.tsDevice.nsec); + + auto imuMagnetometer = imuPacket->mutable_magnetometer(); + imuMagnetometer->mutable_vec()->set_x(packet.magneticField.x); + imuMagnetometer->mutable_vec()->set_y(packet.magneticField.y); + imuMagnetometer->mutable_vec()->set_z(packet.magneticField.z); + imuMagnetometer->mutable_report()->set_accuracy(static_cast(packet.magneticField.accuracy)); + imuMagnetometer->mutable_report()->set_sequence(packet.magneticField.sequence); + imuMagnetometer->mutable_report()->mutable_ts()->set_sec(packet.magneticField.timestamp.sec); + imuMagnetometer->mutable_report()->mutable_ts()->set_nsec(packet.magneticField.timestamp.nsec); + imuMagnetometer->mutable_report()->mutable_tsdevice()->set_sec(packet.magneticField.tsDevice.sec); + imuMagnetometer->mutable_report()->mutable_tsdevice()->set_nsec(packet.magneticField.tsDevice.nsec); + + auto imuRotationVector = imuPacket->mutable_rotationvector(); + imuRotationVector->mutable_quat()->set_x(packet.rotationVector.i); + imuRotationVector->mutable_quat()->set_y(packet.rotationVector.j); + imuRotationVector->mutable_quat()->set_z(packet.rotationVector.k); + imuRotationVector->mutable_quat()->set_w(packet.rotationVector.real); + imuRotationVector->mutable_report()->set_accuracy(static_cast(packet.rotationVector.accuracy)); + imuRotationVector->mutable_report()->set_sequence(packet.rotationVector.sequence); + imuRotationVector->mutable_report()->mutable_ts()->set_sec(packet.rotationVector.timestamp.sec); + imuRotationVector->mutable_report()->mutable_ts()->set_nsec(packet.rotationVector.timestamp.nsec); + imuRotationVector->mutable_report()->mutable_tsdevice()->set_sec(packet.rotationVector.tsDevice.sec); + imuRotationVector->mutable_report()->mutable_tsdevice()->set_nsec(packet.rotationVector.tsDevice.nsec); + } + + // Set timestamps + proto::common::Timestamp* ts = imuData->mutable_ts(); + ts->set_sec(message->ts.sec); + ts->set_nsec(message->ts.nsec); + proto::common::Timestamp* tsDevice = imuData->mutable_tsdevice(); + tsDevice->set_sec(message->tsDevice.sec); + tsDevice->set_nsec(message->tsDevice.nsec); + imuData->set_sequencenum(message->sequenceNum); + + return imuData; +} +template <> +std::unique_ptr getProtoMessage(const ImgDetections* message, bool) { + auto imgDetections = std::make_unique(); + + imgDetections->set_sequencenum(message->sequenceNum); + proto::common::Timestamp* ts = imgDetections->mutable_ts(); + ts->set_sec(message->ts.sec); + ts->set_nsec(message->ts.nsec); + proto::common::Timestamp* tsDevice = imgDetections->mutable_tsdevice(); + tsDevice->set_sec(message->tsDevice.sec); + tsDevice->set_nsec(message->tsDevice.nsec); + + for(const auto& detection : message->detections) { + proto::img_detections::ImgDetection* imgDetection = imgDetections->add_detections(); + imgDetection->set_label(detection.label); + imgDetection->set_confidence(detection.confidence); + imgDetection->set_xmin(detection.xmin); + imgDetection->set_ymin(detection.ymin); + imgDetection->set_xmax(detection.xmax); + imgDetection->set_ymax(detection.ymax); + } + + proto::common::ImgTransformation* imgTransformation = imgDetections->mutable_transformation(); + if(message->transformation.has_value()) { + utility::serializeImgTransformation(imgTransformation, message->transformation.value()); + } + return imgDetections; +} +template <> +std::unique_ptr getProtoMessage(const EncodedFrame* message, bool metadataOnly) { + // Create a unique pointer to the protobuf EncodedFrame message + auto encodedFrame = std::make_unique(); + + // Populate the protobuf message fields with the EncodedFrame data + encodedFrame->set_instancenum(message->instanceNum); // instanceNum -> instancenum + encodedFrame->set_width(message->width); + encodedFrame->set_height(message->height); + encodedFrame->set_quality(message->quality); + encodedFrame->set_bitrate(message->bitrate); + encodedFrame->set_profile(static_cast(message->profile)); // Profile enum + encodedFrame->set_lossless(message->lossless); + encodedFrame->set_type(static_cast(message->type)); // FrameType enum + encodedFrame->set_frameoffset(message->frameOffset); // frameOffset -> frameoffset + encodedFrame->set_framesize(message->frameSize); // frameSize -> framesize + encodedFrame->set_sequencenum(message->sequenceNum); // sequenceNum -> sequencenum + + // Set timestamps + proto::common::Timestamp* ts = encodedFrame->mutable_ts(); + ts->set_sec(message->ts.sec); + ts->set_nsec(message->ts.nsec); + + proto::common::Timestamp* tsDevice = encodedFrame->mutable_tsdevice(); + tsDevice->set_sec(message->tsDevice.sec); + tsDevice->set_nsec(message->tsDevice.nsec); + + // Set camera settings + proto::common::CameraSettings* cam = encodedFrame->mutable_cam(); + cam->set_exposuretimeus(message->cam.exposureTimeUs); // exposureTimeUs -> exposuretimeus + cam->set_sensitivityiso(message->cam.sensitivityIso); // sensitivityIso -> sensitivityiso + cam->set_lensposition(message->cam.lensPosition); // lensPosition -> lensposition + cam->set_wbcolortemp(message->cam.wbColorTemp); // wbColorTemp -> wbcolortemp + cam->set_lenspositionraw(message->cam.lensPositionRaw); // lensPositionRaw -> lenspositionraw + + if(!metadataOnly) { + // Set the encoded message data + encodedFrame->set_data(message->data->getData().data(), message->data->getData().size()); + } + + proto::common::ImgTransformation* imgTransformation = encodedFrame->mutable_transformation(); + utility::serializeImgTransformation(imgTransformation, message->transformation); + + // Return the populated protobuf message + return encodedFrame; +} +template <> +std::unique_ptr getProtoMessage(const ImgFrame* message, bool metadataOnly) { + // create and populate ImgFrame protobuf message + auto imgFrame = std::make_unique(); + proto::common::Timestamp* ts = imgFrame->mutable_ts(); + ts->set_sec(message->ts.sec); + ts->set_nsec(message->ts.nsec); + proto::common::Timestamp* tsDevice = imgFrame->mutable_tsdevice(); + tsDevice->set_sec(message->tsDevice.sec); + tsDevice->set_nsec(message->tsDevice.nsec); + + imgFrame->set_sequencenum(message->sequenceNum); + + proto::img_frame::Specs* fb = imgFrame->mutable_fb(); + fb->set_type(static_cast(message->fb.type)); + fb->set_width(message->fb.width); + fb->set_height(message->fb.height); + fb->set_stride(message->fb.stride); + fb->set_bytespp(message->fb.bytesPP); + fb->set_p1offset(message->fb.p1Offset); + fb->set_p2offset(message->fb.p2Offset); + fb->set_p3offset(message->fb.p3Offset); + + proto::img_frame::Specs* sourceFb = imgFrame->mutable_sourcefb(); + sourceFb->set_type(static_cast(message->sourceFb.type)); + sourceFb->set_width(message->sourceFb.width); + sourceFb->set_height(message->sourceFb.height); + sourceFb->set_stride(message->sourceFb.stride); + sourceFb->set_bytespp(message->sourceFb.bytesPP); + sourceFb->set_p1offset(message->sourceFb.p1Offset); + sourceFb->set_p2offset(message->sourceFb.p2Offset); + sourceFb->set_p3offset(message->sourceFb.p3Offset); + + proto::common::CameraSettings* cam = imgFrame->mutable_cam(); + cam->set_exposuretimeus(message->cam.exposureTimeUs); + cam->set_sensitivityiso(message->cam.sensitivityIso); + cam->set_lensposition(message->cam.lensPosition); + cam->set_wbcolortemp(message->cam.wbColorTemp); + cam->set_lenspositionraw(message->cam.lensPositionRaw); + + imgFrame->set_instancenum(message->instanceNum); + + imgFrame->set_category(message->category); + + proto::common::ImgTransformation* imgTransformation = imgFrame->mutable_transformation(); + utility::serializeImgTransformation(imgTransformation, message->transformation); + + if(!metadataOnly) { + imgFrame->set_data(message->data->getData().data(), message->data->getData().size()); + } + + return imgFrame; +} +template <> +std::unique_ptr getProtoMessage(const PointCloudData* message, bool metadataOnly) { + auto pointCloudData = std::make_unique(); + + auto timestamp = pointCloudData->mutable_ts(); + timestamp->set_sec(message->ts.sec); + timestamp->set_nsec(message->ts.nsec); + + auto timestampDevice = pointCloudData->mutable_tsdevice(); + timestampDevice->set_sec(message->tsDevice.sec); + timestampDevice->set_nsec(message->tsDevice.nsec); + + pointCloudData->set_sequencenum(message->sequenceNum); + pointCloudData->set_width(message->getWidth()); + pointCloudData->set_height(message->getHeight()); + pointCloudData->set_instancenum(message->getInstanceNum()); + pointCloudData->set_minx(message->getMinX()); + pointCloudData->set_miny(message->getMinY()); + pointCloudData->set_minz(message->getMinZ()); + pointCloudData->set_maxx(message->getMaxX()); + pointCloudData->set_maxy(message->getMaxY()); + pointCloudData->set_maxz(message->getMaxZ()); + pointCloudData->set_sparse(message->isSparse()); + pointCloudData->set_color(message->isColor()); + + if(!metadataOnly) { + pointCloudData->set_data(message->data->getData().data(), message->data->getSize()); + } + + return pointCloudData; +} + +// template <> +// void setProtoMessage(ImgAnnotations* obj, std::shared_ptr msg, bool) { +// } +// template <> +// void setProtoMessage(SpatialImgDetections* obj, std::shared_ptr msg, bool) { +// } +// template <> +// void setProtoMessage(ImgDetections* obj, std::shared_ptr msg, bool) { +// } +template <> +void setProtoMessage(IMUData& obj, const google::protobuf::Message* msg, bool) { + auto imuData = dynamic_cast(msg); + obj.packets.clear(); + obj.packets.reserve(imuData->packets().size()); + for(auto packet : imuData->packets()) { + IMUPacket imuPacket; + auto protoAccelerometer = packet.accelerometer(); + auto& daiAccelerometer = imuPacket.acceleroMeter; + daiAccelerometer.x = protoAccelerometer.vec().x(); + daiAccelerometer.y = protoAccelerometer.vec().y(); + daiAccelerometer.z = protoAccelerometer.vec().z(); + daiAccelerometer.accuracy = static_cast(protoAccelerometer.report().accuracy()); + daiAccelerometer.sequence = protoAccelerometer.report().sequence(); + daiAccelerometer.timestamp.sec = protoAccelerometer.report().ts().sec(); + daiAccelerometer.timestamp.nsec = protoAccelerometer.report().ts().nsec(); + daiAccelerometer.tsDevice.sec = protoAccelerometer.report().tsdevice().sec(); + daiAccelerometer.tsDevice.nsec = protoAccelerometer.report().tsdevice().nsec(); + + auto protoGyroscope = packet.gyroscope(); + auto& daiGyroscope = imuPacket.gyroscope; + daiGyroscope.x = protoGyroscope.vec().x(); + daiGyroscope.y = protoGyroscope.vec().y(); + daiGyroscope.z = protoGyroscope.vec().z(); + daiGyroscope.accuracy = static_cast(protoGyroscope.report().accuracy()); + daiGyroscope.sequence = protoGyroscope.report().sequence(); + daiGyroscope.timestamp.sec = protoGyroscope.report().ts().sec(); + daiGyroscope.timestamp.nsec = protoGyroscope.report().ts().nsec(); + daiGyroscope.tsDevice.sec = protoGyroscope.report().tsdevice().sec(); + daiGyroscope.tsDevice.nsec = protoGyroscope.report().tsdevice().nsec(); + + auto protoMagnetometer = packet.magnetometer(); + auto& daiMagnetometer = imuPacket.magneticField; + daiMagnetometer.x = protoMagnetometer.vec().x(); + daiMagnetometer.y = protoMagnetometer.vec().y(); + daiMagnetometer.z = protoMagnetometer.vec().z(); + daiMagnetometer.accuracy = static_cast(protoMagnetometer.report().accuracy()); + daiMagnetometer.sequence = protoMagnetometer.report().sequence(); + daiMagnetometer.timestamp.sec = protoMagnetometer.report().ts().sec(); + daiMagnetometer.timestamp.nsec = protoMagnetometer.report().ts().nsec(); + daiMagnetometer.tsDevice.sec = protoMagnetometer.report().tsdevice().sec(); + daiMagnetometer.tsDevice.nsec = protoMagnetometer.report().tsdevice().nsec(); + + auto protoRotationVector = packet.rotationvector(); + auto& daiRotationVector = imuPacket.rotationVector; + daiRotationVector.i = protoRotationVector.quat().x(); + daiRotationVector.j = protoRotationVector.quat().y(); + daiRotationVector.k = protoRotationVector.quat().z(); + daiRotationVector.real = protoRotationVector.quat().w(); + daiRotationVector.accuracy = static_cast(protoRotationVector.report().accuracy()); + daiRotationVector.sequence = protoRotationVector.report().sequence(); + daiRotationVector.timestamp.sec = protoRotationVector.report().ts().sec(); + daiRotationVector.timestamp.nsec = protoRotationVector.report().ts().nsec(); + daiRotationVector.tsDevice.sec = protoRotationVector.report().tsdevice().sec(); + daiRotationVector.tsDevice.nsec = protoRotationVector.report().tsdevice().nsec(); + + obj.packets.push_back(imuPacket); + } + + obj.setTimestamp(fromProtoTimestamp(imuData->ts())); + obj.setTimestampDevice(fromProtoTimestamp(imuData->tsdevice())); + obj.setSequenceNum(imuData->sequencenum()); +} +template <> +void setProtoMessage(ImgFrame& obj, const google::protobuf::Message* msg, bool metadataOnly) { + auto imgFrame = dynamic_cast(msg); + // create and populate ImgFrame protobuf message + obj.setTimestamp(utility::fromProtoTimestamp(imgFrame->ts())); + obj.setTimestampDevice(utility::fromProtoTimestamp(imgFrame->tsdevice())); + + obj.setSequenceNum(imgFrame->sequencenum()); + + obj.fb.type = static_cast(imgFrame->fb().type()); + obj.fb.width = imgFrame->fb().width(); + obj.fb.height = imgFrame->fb().height(); + obj.fb.stride = imgFrame->fb().stride(); + obj.fb.bytesPP = imgFrame->fb().bytespp(); + obj.fb.p1Offset = imgFrame->fb().p1offset(); + obj.fb.p2Offset = imgFrame->fb().p2offset(); + obj.fb.p3Offset = imgFrame->fb().p3offset(); + + obj.sourceFb.type = static_cast(imgFrame->sourcefb().type()); + obj.sourceFb.width = imgFrame->sourcefb().width(); + obj.sourceFb.height = imgFrame->sourcefb().height(); + obj.sourceFb.stride = imgFrame->sourcefb().stride(); + obj.sourceFb.bytesPP = imgFrame->sourcefb().bytespp(); + obj.sourceFb.p1Offset = imgFrame->sourcefb().p1offset(); + obj.sourceFb.p2Offset = imgFrame->sourcefb().p2offset(); + obj.sourceFb.p3Offset = imgFrame->sourcefb().p3offset(); + + obj.cam.exposureTimeUs = imgFrame->cam().exposuretimeus(); + obj.cam.sensitivityIso = imgFrame->cam().sensitivityiso(); + obj.cam.lensPosition = imgFrame->cam().lensposition(); + obj.cam.wbColorTemp = imgFrame->cam().wbcolortemp(); + obj.cam.lensPositionRaw = imgFrame->cam().lenspositionraw(); + + obj.instanceNum = imgFrame->instancenum(); + + obj.category = imgFrame->category(); + + obj.transformation = deserializeImgTransformation(imgFrame->transformation()); + + if(!metadataOnly) { + std::vector data(imgFrame->data().begin(), imgFrame->data().end()); + obj.setData(data); + } +} +template <> +void setProtoMessage(EncodedFrame& obj, const google::protobuf::Message* msg, bool metadataOnly) { + auto encFrame = dynamic_cast(msg); + // create and populate ImgFrame protobuf message + obj.setTimestamp(utility::fromProtoTimestamp(encFrame->ts())); + obj.setTimestampDevice(utility::fromProtoTimestamp(encFrame->tsdevice())); + + obj.setSequenceNum(encFrame->sequencenum()); + + obj.width = encFrame->width(); + obj.height = encFrame->height(); + + obj.instanceNum = encFrame->instancenum(); + + obj.quality = encFrame->quality(); + obj.bitrate = encFrame->bitrate(); + obj.profile = static_cast(encFrame->profile()); + + obj.lossless = encFrame->lossless(); + obj.type = static_cast(encFrame->type()); + + obj.frameOffset = encFrame->frameoffset(); + obj.frameSize = encFrame->framesize(); + + obj.cam.exposureTimeUs = encFrame->cam().exposuretimeus(); + obj.cam.sensitivityIso = encFrame->cam().sensitivityiso(); + obj.cam.lensPosition = encFrame->cam().lensposition(); + obj.cam.wbColorTemp = encFrame->cam().wbcolortemp(); + obj.cam.lensPositionRaw = encFrame->cam().lenspositionraw(); + + obj.transformation = deserializeImgTransformation(encFrame->transformation()); + + if(!metadataOnly) { + std::vector data(encFrame->data().begin(), encFrame->data().end()); + obj.setData(data); + } +} +template <> +void setProtoMessage(PointCloudData& obj, const google::protobuf::Message* msg, bool metadataOnly) { + auto pcl = dynamic_cast(msg); + // create and populate ImgFrame protobuf message + obj.setTimestamp(utility::fromProtoTimestamp(pcl->ts())); + obj.setTimestampDevice(utility::fromProtoTimestamp(pcl->tsdevice())); + + obj.setSequenceNum(pcl->sequencenum()); + + obj.setWidth(pcl->width()); + obj.setHeight(pcl->height()); + + obj.setInstanceNum(pcl->instancenum()); + + obj.setMinX(pcl->minx()); + obj.setMinY(pcl->miny()); + obj.setMinZ(pcl->minz()); + obj.setMaxX(pcl->maxx()); + obj.setMaxY(pcl->maxy()); + obj.setMaxZ(pcl->maxz()); + obj.setSparse(pcl->sparse()); + obj.setColor(pcl->color()); + + if(!metadataOnly) { + std::vector data(pcl->data().begin(), pcl->data().end()); + obj.setData(data); + } +} }; // namespace utility }; // namespace dai diff --git a/src/utility/ProtoSerialize.hpp b/src/utility/ProtoSerialize.hpp index 22c0ffbdd..b24e8773f 100644 --- a/src/utility/ProtoSerialize.hpp +++ b/src/utility/ProtoSerialize.hpp @@ -6,8 +6,20 @@ #include #include "depthai/common/ImgTransformations.hpp" +#include "depthai/pipeline/datatype/DatatypeEnum.hpp" +#include "depthai/pipeline/datatype/ImgAnnotations.hpp" +#include "depthai/pipeline/datatype/SpatialImgDetections.hpp" +#include "depthai/pipeline/datatypes.hpp" +#include "depthai/schemas/EncodedFrame.pb.h" +#include "depthai/schemas/IMUData.pb.h" +#include "depthai/schemas/ImageAnnotations.pb.h" +#include "depthai/schemas/ImgDetections.pb.h" +#include "depthai/schemas/ImgFrame.pb.h" +#include "depthai/schemas/PointCloudData.pb.h" +#include "depthai/schemas/SpatialImgDetections.pb.h" #include "depthai/schemas/common.pb.h" #include "utility/ProtoSerializable.hpp" + namespace dai { namespace utility { @@ -16,6 +28,55 @@ ProtoSerializable::SchemaPair serializeSchema(std::unique_ptr fromProtoTimestamp(const dai::proto::common::Timestamp& ts) { + using namespace std::chrono; + return time_point(seconds(ts.sec()) + nanoseconds(ts.nsec())); +} + +// Helpers to serialize messages to protobuf +template +std::unique_ptr getProtoMessage(const T*, bool = false) { + throw std::runtime_error("getProtoMessage not implemented for this type"); +} +template <> +std::unique_ptr getProtoMessage(const ImgAnnotations* message, bool); +template <> +std::unique_ptr getProtoMessage(const SpatialImgDetections* message, bool); +template <> +std::unique_ptr getProtoMessage(const IMUData* message, bool); +template <> +std::unique_ptr getProtoMessage(const ImgDetections* message, bool); +template <> +std::unique_ptr getProtoMessage(const EncodedFrame* message, bool metadataOnly); +template <> +std::unique_ptr getProtoMessage(const ImgFrame* message, bool metadataOnly); +template <> +std::unique_ptr getProtoMessage(const PointCloudData* message, bool metadataOnly); + +// Helpers to deserialize messages from protobuf +template +void setProtoMessage(T&, const google::protobuf::Message*, bool = false); +// template <> +// void setProtoMessage(ImgAnnotations& obj, google::protobuf::Message* msg, bool); +// template <> +// void setProtoMessage(SpatialImgDetections& obj, google::protobuf::Message* msg, bool); +// template <> +// void setProtoMessage(ImgDetections& obj, google::protobuf::Message* msg, bool); +template <> +void setProtoMessage(IMUData& obj, const google::protobuf::Message* msg, bool); +template <> +void setProtoMessage(ImgFrame& obj, const google::protobuf::Message* msg, bool metadataOnly); +template <> +void setProtoMessage(EncodedFrame& obj, const google::protobuf::Message* msg, bool metadataOnly); +template <> +void setProtoMessage(PointCloudData& obj, const google::protobuf::Message* msg, bool metadataOnly); }; // namespace utility -}; // namespace dai \ No newline at end of file +}; // namespace dai diff --git a/src/utility/RecordReplay.cpp b/src/utility/RecordReplay.cpp index 86f86faa9..364f97b38 100644 --- a/src/utility/RecordReplay.cpp +++ b/src/utility/RecordReplay.cpp @@ -9,78 +9,77 @@ #include "Environment.hpp" #include "RecordReplayImpl.hpp" #include "build/version.hpp" -#include "depthai/utility/RecordReplaySchema.hpp" #include "utility/Compression.hpp" #include "utility/Platform.hpp" +#ifdef DEPTHAI_ENABLE_PROTOBUF + #include + + #include "depthai/schemas/ImgFrame.pb.h" +#endif namespace dai { namespace utility { -ByteRecorder::~ByteRecorder() { - close(); +#ifdef DEPTHAI_ENABLE_PROTOBUF +// Recursively adds all `fd` dependencies to `fd_set`. +void fdSetInternal(google::protobuf::FileDescriptorSet& fd_set, std::unordered_set& files, const google::protobuf::FileDescriptor* fd) { + for(int i = 0; i < fd->dependency_count(); ++i) { + const auto* dep = fd->dependency(i); + auto [_, inserted] = files.insert(dep->name()); + if(!inserted) continue; + fdSetInternal(fd_set, files, fd->dependency(i)); + } + fd->CopyTo(fd_set.add_file()); } -void ByteRecorder::init(const std::string& filePath, RecordConfig::CompressionLevel compressionLevel, RecordType recordingType) { - if(initialized) { - throw std::runtime_error("ByteRecorder already initialized"); - } - if(filePath.empty()) { - throw std::runtime_error("ByteRecorder file path is empty"); - } - { - auto options = mcap::McapWriterOptions(""); - options.library = "depthai" + std::string(build::VERSION); - switch(compressionLevel) { - case RecordConfig::CompressionLevel::NONE: - options.compression = mcap::Compression::None; - break; - case RecordConfig::CompressionLevel::FASTEST: - options.compressionLevel = mcap::CompressionLevel::Fastest; - break; - case RecordConfig::CompressionLevel::FAST: - options.compressionLevel = mcap::CompressionLevel::Fast; - break; - case RecordConfig::CompressionLevel::DEFAULT: - options.compressionLevel = mcap::CompressionLevel::Default; - break; - case RecordConfig::CompressionLevel::SLOW: - options.compressionLevel = mcap::CompressionLevel::Slow; - break; - case RecordConfig::CompressionLevel::SLOWEST: - options.compressionLevel = mcap::CompressionLevel::Slowest; - break; - } - options.compression = mcap::Compression::Lz4; - const auto res = writer.open(filePath, options); - if(!res.ok()) { - throw std::runtime_error("Failed to open file for writing: " + res.message); - } - } - { - const char* schemaText = DEFAULT_SHEMA; - std::string channelName = "default"; - switch(recordingType) { - case RecordType::Video: - channelName = "video"; - schemaText = VIDEO_SHEMA; - break; - case RecordType::Imu: - channelName = "imu"; - schemaText = IMU_SHEMA; - break; - case RecordType::Other: - channelName = "default"; - schemaText = DEFAULT_SHEMA; - break; - } - mcap::Schema schema(channelName, "jsonschema", schemaText); - writer.addSchema(schema); - mcap::Channel channel(channelName, "json", schema.id); - writer.addChannel(channel); - channelId = channel.id; +// Returns a serialized google::protobuf::FileDescriptorSet containing +// the necessary google::protobuf::FileDescriptor's to describe d. +std::string fdSet(const google::protobuf::Descriptor* d) { + std::string res; + std::unordered_set files; + google::protobuf::FileDescriptorSet fd_set; + fdSetInternal(fd_set, files, d->file()); + return fd_set.SerializeAsString(); +} + +mcap::Schema createSchema(const google::protobuf::Descriptor* d) { + mcap::Schema schema(d->full_name(), "protobuf", fdSet(d)); + return schema; +} +#endif + +void ByteRecorder::setWriter(const std::string& filePath, RecordConfig::CompressionLevel compressionLevel) { + auto options = mcap::McapWriterOptions("protobuf"); + options.library = "depthai" + std::string(build::VERSION); + switch(compressionLevel) { + case RecordConfig::CompressionLevel::NONE: + options.compression = mcap::Compression::None; + break; + case RecordConfig::CompressionLevel::FASTEST: + options.compressionLevel = mcap::CompressionLevel::Fastest; + break; + case RecordConfig::CompressionLevel::FAST: + options.compressionLevel = mcap::CompressionLevel::Fast; + break; + case RecordConfig::CompressionLevel::DEFAULT: + options.compressionLevel = mcap::CompressionLevel::Default; + break; + case RecordConfig::CompressionLevel::SLOW: + options.compressionLevel = mcap::CompressionLevel::Slow; + break; + case RecordConfig::CompressionLevel::SLOWEST: + options.compressionLevel = mcap::CompressionLevel::Slowest; + break; + } + options.compression = mcap::Compression::Lz4; + const auto res = writer.open(filePath, options); + if(!res.ok()) { + throw std::runtime_error("Failed to open file for writing: " + res.message); } +} - initialized = true; +ByteRecorder::~ByteRecorder() { + close(); } void ByteRecorder::close() { @@ -94,7 +93,7 @@ BytePlayer::~BytePlayer() { close(); } -void BytePlayer::init(const std::string& filePath) { +std::string BytePlayer::init(const std::string& filePath) { if(initialized) { throw std::runtime_error("BytePlayer already initialized"); } @@ -108,25 +107,12 @@ void BytePlayer::init(const std::string& filePath) { } } messageView = std::make_unique(reader.readMessages()); + if(messageView->begin() == messageView->end()) { + throw std::runtime_error("No messages in file"); + } it = std::make_unique(messageView->begin()); initialized = true; -} - -std::optional BytePlayer::next() { - if(!initialized) { - throw std::runtime_error("BytePlayer not initialized"); - } - if(*it == messageView->end()) return std::nullopt; - if((*it)->channel->messageEncoding != "json") { - throw std::runtime_error("Unsupported message encoding: " + (*it)->channel->messageEncoding); - } - std::string_view asString(reinterpret_cast((*it)->message.data), (*it)->message.dataSize); - - nlohmann::json j = nlohmann::json::parse(asString); - - ++(*it); - - return j; + return (*it)->schema->name; } void BytePlayer::restart() { @@ -144,6 +130,7 @@ void BytePlayer::close() { } std::optional> BytePlayer::getVideoSize(const std::string& filePath) { +#ifdef DEPTHAI_ENABLE_PROTOBUF if(filePath.empty()) { throw std::runtime_error("File path is empty in BytePlayer::getVideoSize"); } @@ -159,20 +146,20 @@ std::optional> BytePlayer::getVideoSize(const std return std::nullopt; } else { auto msg = messageView.begin(); - if(msg->channel->messageEncoding != "json") { + if(msg->channel->messageEncoding != "protobuf") { throw std::runtime_error("Unsupported message encoding: " + msg->channel->messageEncoding); } - std::string_view asString(reinterpret_cast(msg->message.data), msg->message.dataSize); - nlohmann::json j = nlohmann::json::parse(asString); - - auto type = j["type"].get(); - if(type == utility::RecordType::Video) { - auto width = j["width"].get(); - auto height = j["height"].get(); - return std::make_tuple(width, height); + proto::img_frame::ImgFrame adatatype; + if(!adatatype.ParseFromArray(reinterpret_cast(msg->message.data), msg->message.dataSize)) { + throw std::runtime_error("Failed to parse protobuf message"); } + + return std::make_tuple(adatatype.fb().width(), adatatype.fb().height()); } return std::nullopt; +#else + throw std::runtime_error("BytePlayer::getVideoSize requires protobuf support"); +#endif } bool checkRecordConfig(std::string& recordPath, RecordConfig& config) { @@ -238,5 +225,11 @@ std::string matchTo(const std::vector& deviceIds, const std::vector return deviceId; } +#ifndef DEPTHAI_HAVE_OPENCV_SUPPORT +std::tuple getVideoSize(const std::string& filePath) { + throw std::runtime_error("OpenCV is required to get video size"); +} +#endif + } // namespace utility } // namespace dai diff --git a/src/utility/RecordReplayImpl.hpp b/src/utility/RecordReplayImpl.hpp index 5fa98c2f7..5b2cc1a20 100644 --- a/src/utility/RecordReplayImpl.hpp +++ b/src/utility/RecordReplayImpl.hpp @@ -1,8 +1,17 @@ #include "depthai/utility/RecordReplay.hpp" #include "mcap/mcap.hpp" +#ifdef DEPTHAI_ENABLE_PROTOBUF + #include +#endif + namespace dai { namespace utility { + +#ifdef DEPTHAI_ENABLE_PROTOBUF +mcap::Schema createSchema(const google::protobuf::Descriptor* d); +#endif + class VideoRecorder { public: enum class VideoCodec { H264, MJPEG, RAW }; @@ -32,21 +41,43 @@ class VideoRecorder { }; class ByteRecorder { + private: + void setWriter(const std::string& filePath, RecordConfig::CompressionLevel compressionLevel); + public: ~ByteRecorder(); - void init(const std::string& filePath, RecordConfig::CompressionLevel compressionLevel, RecordType recordingType); template - void write(const T& data) { + void init(const std::string& filePath, RecordConfig::CompressionLevel compressionLevel, const std::string& channelName) { +#ifdef DEPTHAI_ENABLE_PROTOBUF + if(initialized) { + throw std::runtime_error("ByteRecorder already initialized"); + } + if(filePath.empty()) { + throw std::runtime_error("ByteRecorder file path is empty"); + } + setWriter(filePath, compressionLevel); + { + mcap::Schema schema = createSchema(T::descriptor()); + writer.addSchema(schema); + mcap::Channel channel(channelName, "protobuf", schema.id); + writer.addChannel(channel); + channelId = channel.id; + } + + initialized = true; +#else + throw std::runtime_error("ByteRecorder not supported without protobuf support"); +#endif + } + void write(std::vector data) { mcap::Timestamp writeTime = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); - nlohmann::json j = data; - std::string serialized = j.dump(); mcap::Message msg; msg.channelId = channelId; msg.logTime = writeTime; msg.publishTime = writeTime; msg.sequence = index++; - msg.data = reinterpret_cast(serialized.data()); - msg.dataSize = serialized.size(); + msg.data = reinterpret_cast(data.data()); + msg.dataSize = data.size(); const auto res = writer.write(msg); if(!res.ok()) { writer.close(); @@ -60,8 +91,6 @@ class ByteRecorder { private: bool initialized = false; - std::ofstream file; - uint64_t index = 0; mcap::McapWriter writer; mcap::ChannelId channelId; @@ -94,8 +123,26 @@ class VideoPlayer { class BytePlayer { public: ~BytePlayer(); - void init(const std::string& filePath); - std::optional next(); + std::string init(const std::string& filePath); + template + std::optional next() { + if(!initialized) { + throw std::runtime_error("BytePlayer not initialized"); + } + if(*it == messageView->end()) return std::nullopt; + if((*it)->channel->messageEncoding != "protobuf") { + throw std::runtime_error("Unsupported message encoding: " + (*it)->channel->messageEncoding); + } + + T data; + if(!data.ParseFromArray(reinterpret_cast((*it)->message.data), (*it)->message.dataSize)) { + throw std::runtime_error("Failed to parse protobuf message"); + } + + ++(*it); + + return data; + } void restart(); void close(); static std::optional> getVideoSize(const std::string& filePath); @@ -115,5 +162,8 @@ bool checkRecordConfig(std::string& recordPath, RecordConfig& config); bool allMatch(const std::vector& v1, const std::vector& v2); std::string matchTo(const std::vector& deviceIds, const std::vector& filenames, const std::vector& nodenames); + +std::tuple getVideoSize(const std::string& filePath); + } // namespace utility } // namespace dai