diff --git a/CMakeLists.txt b/CMakeLists.txt index 869bff4e1d..ff4122a052 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -273,6 +273,7 @@ set(TARGET_CORE_SOURCES src/device/CalibrationHandler.cpp src/device/Version.cpp src/pipeline/Pipeline.cpp + src/pipeline/PipelineStateApi.cpp src/pipeline/AssetManager.cpp src/pipeline/MessageQueue.cpp src/pipeline/Node.cpp @@ -281,6 +282,8 @@ set(TARGET_CORE_SOURCES src/pipeline/ThreadedHostNode.cpp src/pipeline/DeviceNode.cpp src/pipeline/DeviceNodeGroup.cpp + src/pipeline/node/internal/PipelineEventAggregation.cpp + src/pipeline/node/internal/PipelineStateMerge.cpp src/pipeline/node/internal/XLinkIn.cpp src/pipeline/node/internal/XLinkOut.cpp src/pipeline/node/ColorCamera.cpp @@ -351,6 +354,9 @@ set(TARGET_CORE_SOURCES src/pipeline/datatype/PointCloudConfig.cpp src/pipeline/datatype/ObjectTrackerConfig.cpp src/pipeline/datatype/PointCloudData.cpp + src/pipeline/datatype/PipelineEvent.cpp + src/pipeline/datatype/PipelineState.cpp + src/pipeline/datatype/PipelineEventAggregationConfig.cpp src/pipeline/datatype/RGBDData.cpp src/pipeline/datatype/MessageGroup.cpp src/pipeline/datatype/ThermalConfig.cpp @@ -367,6 +373,7 @@ set(TARGET_CORE_SOURCES src/utility/Initialization.cpp src/utility/Resources.cpp src/utility/Platform.cpp + src/utility/PipelineEventDispatcher.cpp src/utility/RecordReplay.cpp src/utility/McapImpl.cpp src/utility/Environment.cpp diff --git a/bindings/python/CMakeLists.txt b/bindings/python/CMakeLists.txt index 2a4bceee35..58209d8832 100644 --- a/bindings/python/CMakeLists.txt +++ b/bindings/python/CMakeLists.txt @@ -98,7 +98,7 @@ set(SOURCE_LIST src/pipeline/node/UVCBindings.cpp src/pipeline/node/ToFBindings.cpp src/pipeline/node/PointCloudBindings.cpp - src/pipeline/node/SyncBindings.cpp + src/pipeline/node/PipelineEventAggregationBindings.cpp src/pipeline/node/MessageDemuxBindings.cpp src/pipeline/node/HostNodeBindings.cpp src/pipeline/node/RecordBindings.cpp @@ -136,6 +136,8 @@ set(SOURCE_LIST src/pipeline/datatype/PointCloudConfigBindings.cpp src/pipeline/datatype/ObjectTrackerConfigBindings.cpp src/pipeline/datatype/PointCloudDataBindings.cpp + src/pipeline/datatype/PipelineEventBindings.cpp + src/pipeline/datatype/PipelineStateBindings.cpp src/pipeline/datatype/TransformDataBindings.cpp src/pipeline/datatype/ImageAlignConfigBindings.cpp src/pipeline/datatype/ImgAnnotationsBindings.cpp @@ -149,6 +151,7 @@ set(SOURCE_LIST src/remote_connection/RemoteConnectionBindings.cpp src/utility/EventsManagerBindings.cpp + src/utility/PipelineEventDispatcherBindings.cpp ) if(DEPTHAI_MERGED_TARGET) list(APPEND SOURCE_LIST @@ -412,4 +415,4 @@ endif() ######################## if (DEPTHAI_PYTHON_ENABLE_EXAMPLES) add_subdirectory(../../examples/python ${CMAKE_BINARY_DIR}/examples/python) -endif() \ No newline at end of file +endif() diff --git a/bindings/python/src/DatatypeBindings.cpp b/bindings/python/src/DatatypeBindings.cpp index 69d1c922a4..8276647fa2 100644 --- a/bindings/python/src/DatatypeBindings.cpp +++ b/bindings/python/src/DatatypeBindings.cpp @@ -30,6 +30,8 @@ void bind_tracklets(pybind11::module& m, void* pCallstack); void bind_benchmarkreport(pybind11::module& m, void* pCallstack); void bind_pointcloudconfig(pybind11::module& m, void* pCallstack); void bind_pointclouddata(pybind11::module& m, void* pCallstack); +void bind_pipelineevent(pybind11::module& m, void* pCallstack); +void bind_pipelinestate(pybind11::module& m, void* pCallstack); void bind_transformdata(pybind11::module& m, void* pCallstack); void bind_rgbddata(pybind11::module& m, void* pCallstack); void bind_imagealignconfig(pybind11::module& m, void* pCallstack); @@ -71,6 +73,8 @@ void DatatypeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_benchmarkreport); callstack.push_front(bind_pointcloudconfig); callstack.push_front(bind_pointclouddata); + callstack.push_front(bind_pipelineevent); + callstack.push_front(bind_pipelinestate); callstack.push_front(bind_transformdata); callstack.push_front(bind_imagealignconfig); callstack.push_front(bind_imageannotations); @@ -131,5 +135,7 @@ void DatatypeBindings::bind(pybind11::module& m, void* pCallstack) { .value("PointCloudData", DatatypeEnum::PointCloudData) .value("ImageAlignConfig", DatatypeEnum::ImageAlignConfig) .value("ImgAnnotations", DatatypeEnum::ImgAnnotations) - .value("RGBDData", DatatypeEnum::RGBDData); + .value("RGBDData", DatatypeEnum::RGBDData) + .value("PipelineEvent", DatatypeEnum::PipelineEvent) + .value("PipelineState", DatatypeEnum::PipelineState); } diff --git a/bindings/python/src/pipeline/PipelineBindings.cpp b/bindings/python/src/pipeline/PipelineBindings.cpp index 02d10b3bd0..f2b1e6ee8c 100644 --- a/bindings/python/src/pipeline/PipelineBindings.cpp +++ b/bindings/python/src/pipeline/PipelineBindings.cpp @@ -64,6 +64,9 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack) { py::class_ globalProperties(m, "GlobalProperties", DOC(dai, GlobalProperties)); py::class_ recordConfig(m, "RecordConfig", DOC(dai, RecordConfig)); py::class_ recordVideoConfig(recordConfig, "VideoEncoding", DOC(dai, RecordConfig, VideoEncoding)); + py::class_ pipelineStateApi(m, "PipelineStateApi", DOC(dai, PipelineStateApi)); + py::class_ nodesStateApi(m, "NodesStateApi", DOC(dai, NodesStateApi)); + py::class_ nodeStateApi(m, "NodeStateApi", DOC(dai, NodeStateApi)); py::class_ pipeline(m, "Pipeline", DOC(dai, Pipeline, 2)); /////////////////////////////////////////////////////////////////////// @@ -102,6 +105,59 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack) { .def_readwrite("videoEncoding", &RecordConfig::videoEncoding, DOC(dai, RecordConfig, videoEncoding)) .def_readwrite("compressionLevel", &RecordConfig::compressionLevel, DOC(dai, RecordConfig, compressionLevel)); + pipelineStateApi.def("nodes", static_cast(&PipelineStateApi::nodes), DOC(dai, PipelineStateApi, nodes)) + .def("nodes", + static_cast&)>(&PipelineStateApi::nodes), + py::arg("nodeIds"), + DOC(dai, PipelineStateApi, nodes, 2)) + .def("nodes", + static_cast(&PipelineStateApi::nodes), + py::arg("nodeId"), + DOC(dai, PipelineStateApi, nodes, 3)); + + nodesStateApi.def("summary", &NodesStateApi::summary, DOC(dai, NodesStateApi, summary)) + .def("detailed", &NodesStateApi::detailed, DOC(dai, NodesStateApi, detailed)) + .def("outputs", &NodesStateApi::outputs, DOC(dai, NodesStateApi, outputs)) + .def("inputs", &NodesStateApi::inputs, DOC(dai, NodesStateApi, inputs)) + .def("otherTimings", &NodesStateApi::otherTimings, DOC(dai, NodesStateApi, otherTimings)); + + nodeStateApi.def("summary", &NodeStateApi::summary, DOC(dai, NodeStateApi, summary)) + .def("detailed", &NodeStateApi::detailed, DOC(dai, NodeStateApi, detailed)) + .def("outputs", + static_cast (NodeStateApi::*)()>(&NodeStateApi::outputs), + DOC(dai, NodeStateApi, outputs)) + .def("outputs", + static_cast (NodeStateApi::*)(const std::vector&)>( + &NodeStateApi::outputs), + py::arg("outputNames"), + DOC(dai, NodeStateApi, outputs, 2)) + .def("outputs", + static_cast(&NodeStateApi::outputs), + py::arg("outputName"), + DOC(dai, NodeStateApi, outputs, 3)) + .def("inputs", + static_cast (NodeStateApi::*)()>(&NodeStateApi::inputs), + DOC(dai, NodeStateApi, inputs)) + .def("inputs", + static_cast (NodeStateApi::*)(const std::vector&)>(&NodeStateApi::inputs), + py::arg("inputNames"), + DOC(dai, NodeStateApi, inputs, 2)) + .def("inputs", + static_cast(&NodeStateApi::inputs), + py::arg("inputName"), + DOC(dai, NodeStateApi, inputs, 3)) + .def("otherTimings", + static_cast (NodeStateApi::*)()>(&NodeStateApi::otherTimings), + DOC(dai, NodeStateApi, otherTimings)) + .def("otherTimings", + static_cast (NodeStateApi::*)(const std::vector&)>(&NodeStateApi::otherTimings), + py::arg("statNames"), + DOC(dai, NodeStateApi, otherTimings, 2)) + .def("otherStats", + static_cast(&NodeStateApi::otherStats), + py::arg("statName"), + DOC(dai, NodeStateApi, otherStats)); + // bind pipeline pipeline .def(py::init([](bool createImplicitDevice) { diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp new file mode 100644 index 0000000000..23435044f5 --- /dev/null +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -0,0 +1,59 @@ +#include +#include + +#include "DatatypeBindings.hpp" +#include "pipeline/CommonBindings.hpp" + +// depthai +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +// pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_pipelineevent(pybind11::module& m, void* pCallstack) { + using namespace dai; + + py::class_, Buffer, std::shared_ptr> pipelineEvent(m, "PipelineEvent", DOC(dai, PipelineEvent)); + py::enum_ pipelineEventType(pipelineEvent, "Type", DOC(dai, PipelineEvent, Type)); + py::enum_ pipelineEventInterval(pipelineEvent, "Interval", DOC(dai, PipelineEvent, Interval)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + pipelineEventType.value("CUSTOM", PipelineEvent::Type::CUSTOM) + .value("LOOP", PipelineEvent::Type::LOOP) + .value("INPUT", PipelineEvent::Type::INPUT) + .value("OUTPUT", PipelineEvent::Type::OUTPUT); + pipelineEventInterval.value("NONE", PipelineEvent::Interval::NONE) + .value("START", PipelineEvent::Interval::START) + .value("END", PipelineEvent::Interval::END); + + // Message + pipelineEvent.def(py::init<>()) + .def("__repr__", &PipelineEvent::str) + .def_readwrite("nodeId", &PipelineEvent::nodeId, DOC(dai, PipelineEvent, nodeId)) + .def_readwrite("status", &PipelineEvent::status, DOC(dai, PipelineEvent, status)) + .def_readwrite("queueSize", &PipelineEvent::queueSize, DOC(dai, PipelineEvent, queueSize)) + .def_readwrite("interval", &PipelineEvent::interval, DOC(dai, PipelineEvent, interval)) + .def_readwrite("type", &PipelineEvent::type, DOC(dai, PipelineEvent, type)) + .def_readwrite("source", &PipelineEvent::source, DOC(dai, PipelineEvent, source)) + .def("getTimestamp", &PipelineEvent::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) + .def("getTimestampDevice", &PipelineEvent::Buffer::getTimestampDevice, DOC(dai, Buffer, getTimestampDevice)) + .def("getSequenceNum", &PipelineEvent::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) + .def("setTimestamp", &PipelineEvent::setTimestamp, DOC(dai, Buffer, setTimestamp)) + .def("setTimestampDevice", &PipelineEvent::setTimestampDevice, DOC(dai, Buffer, setTimestampDevice)) + .def("setSequenceNum", &PipelineEvent::setSequenceNum, DOC(dai, Buffer, setSequenceNum)); +} diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp new file mode 100644 index 0000000000..bb42da24b2 --- /dev/null +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -0,0 +1,112 @@ +#include + +#include "DatatypeBindings.hpp" + +// depthai +#include "depthai/pipeline/datatype/PipelineState.hpp" + +// pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_pipelinestate(pybind11::module& m, void* pCallstack) { + using namespace dai; + + py::class_ nodeState(m, "NodeState", DOC(dai, NodeState)); + py::class_ durationEvent(nodeState, "DurationEvent", DOC(dai, NodeState, DurationEvent)); + py::class_ nodeStateTimingStats(nodeState, "TimingStats", DOC(dai, NodeState, TimingStats)); + py::class_ nodeStateTiming(nodeState, "Timing", DOC(dai, NodeState, Timing)); + py::class_ nodeStateQueueStats(nodeState, "QueueStats", DOC(dai, NodeState, QueueStats)); + py::class_ nodeStateInputQueueState(nodeState, "InputQueueState", DOC(dai, NodeState, InputQueueState)); + py::class_ nodeStateOutputQueueState(nodeState, "OutputQueueState", DOC(dai, NodeState, OutputQueueState)); + py::class_, Buffer, std::shared_ptr> pipelineState(m, "PipelineState", DOC(dai, PipelineState)); + + py::enum_(nodeStateInputQueueState, "State", DOC(dai, NodeState, InputQueueState, State)) + .value("IDLE", NodeState::InputQueueState::State::IDLE) + .value("WAITING", NodeState::InputQueueState::State::WAITING) + .value("BLOCKED", NodeState::InputQueueState::State::BLOCKED); + py::enum_(nodeStateOutputQueueState, "State", DOC(dai, NodeState, OutputQueueState, State)) + .value("IDLE", NodeState::OutputQueueState::State::IDLE) + .value("SENDING", NodeState::OutputQueueState::State::SENDING); + py::enum_(nodeState, "State", DOC(dai, NodeState, State)) + .value("IDLE", NodeState::State::IDLE) + .value("GETTING_INPUTS", NodeState::State::GETTING_INPUTS) + .value("PROCESSING", NodeState::State::PROCESSING) + .value("SENDING_OUTPUTS", NodeState::State::SENDING_OUTPUTS); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + durationEvent.def(py::init<>()) + .def("__repr__", &NodeState::DurationEvent::str) + .def_readwrite("startEvent", &NodeState::DurationEvent::startEvent, DOC(dai, NodeState, DurationEvent, startEvent)) + .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, DurationEvent, durationUs)); + + nodeStateTimingStats.def(py::init<>()) + .def("__repr__", &NodeState::TimingStats::str) + .def_readwrite("minMicros", &NodeState::TimingStats::minMicros, DOC(dai, NodeState, TimingStats, minMicros)) + .def_readwrite("maxMicros", &NodeState::TimingStats::maxMicros, DOC(dai, NodeState, TimingStats, maxMicros)) + .def_readwrite("averageMicrosRecent", &NodeState::TimingStats::averageMicrosRecent, DOC(dai, NodeState, TimingStats, averageMicrosRecent)) + .def_readwrite("stdDevMicrosRecent", &NodeState::TimingStats::stdDevMicrosRecent, DOC(dai, NodeState, TimingStats, stdDevMicrosRecent)) + .def_readwrite("minMicrosRecent", &NodeState::TimingStats::minMicrosRecent, DOC(dai, NodeState, TimingStats, minMicrosRecent)) + .def_readwrite("maxMicrosRecent", &NodeState::TimingStats::maxMicrosRecent, DOC(dai, NodeState, TimingStats, maxMicrosRecent)) + .def_readwrite("medianMicrosRecent", &NodeState::TimingStats::medianMicrosRecent, DOC(dai, NodeState, TimingStats, medianMicrosRecent)); + + nodeStateTiming.def(py::init<>()) + .def("__repr__", &NodeState::Timing::str) + .def_readwrite("fps", &NodeState::Timing::fps, DOC(dai, NodeState, Timing, fps)) + .def_readwrite("durationStats", &NodeState::Timing::durationStats, DOC(dai, NodeState, Timing, durationStats)); + + nodeStateQueueStats.def(py::init<>()) + .def("__repr__", &NodeState::QueueStats::str) + .def_readwrite("maxQueued", &NodeState::QueueStats::maxQueued, DOC(dai, NodeState, QueueStats, maxQueued)) + .def_readwrite("minQueuedRecent", &NodeState::QueueStats::minQueuedRecent, DOC(dai, NodeState, QueueStats, minQueuedRecent)) + .def_readwrite("maxQueuedRecent", &NodeState::QueueStats::maxQueuedRecent, DOC(dai, NodeState, QueueStats, maxQueuedRecent)) + .def_readwrite("medianQueuedRecent", &NodeState::QueueStats::medianQueuedRecent, DOC(dai, NodeState, QueueStats, medianQueuedRecent)); + + nodeStateInputQueueState.def(py::init<>()) + .def("__repr__", &NodeState::InputQueueState::str) + .def_readwrite("state", &NodeState::InputQueueState::state, DOC(dai, NodeState, InputQueueState, state)) + .def_readwrite("numQueued", &NodeState::InputQueueState::numQueued, DOC(dai, NodeState, InputQueueState, numQueued)) + .def_readwrite("timing", &NodeState::InputQueueState::timing, DOC(dai, NodeState, InputQueueState, timing)) + .def_readwrite("queueStats", &NodeState::InputQueueState::queueStats, DOC(dai, NodeState, InputQueueState, queueStats)); + + nodeStateOutputQueueState.def(py::init<>()) + .def("__repr__", &NodeState::OutputQueueState::str) + .def_readwrite("state", &NodeState::OutputQueueState::state, DOC(dai, NodeState, OutputQueueState, state)) + .def_readwrite("timing", &NodeState::OutputQueueState::timing, DOC(dai, NodeState, OutputQueueState, timing)); + + nodeState.def(py::init<>()) + .def("__repr__", &NodeState::str) + .def_readwrite("state", &NodeState::state, DOC(dai, NodeState, state)) + .def_readwrite("events", &NodeState::events, DOC(dai, NodeState, events)) + .def_readwrite("inputStates", &NodeState::inputStates, DOC(dai, NodeState, inputStates)) + .def_readwrite("outputStates", &NodeState::outputStates, DOC(dai, NodeState, outputStates)) + .def_readwrite("inputsGetTiming", &NodeState::inputsGetTiming, DOC(dai, NodeState, inputsGetTiming)) + .def_readwrite("outputsSendTiming", &NodeState::outputsSendTiming, DOC(dai, NodeState, outputsSendTiming)) + .def_readwrite("mainLoopTiming", &NodeState::mainLoopTiming, DOC(dai, NodeState, mainLoopTiming)) + .def_readwrite("otherTimings", &NodeState::otherTimings, DOC(dai, NodeState, otherTimings)); + + // Message + pipelineState.def(py::init<>()) + .def("__repr__", &PipelineState::str) + .def_readwrite("nodeStates", &PipelineState::nodeStates, DOC(dai, PipelineState, nodeStates)) + .def("getTimestamp", &PipelineState::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) + .def("getTimestampDevice", &PipelineState::Buffer::getTimestampDevice, DOC(dai, Buffer, getTimestampDevice)) + .def("getSequenceNum", &PipelineState::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) + .def("setTimestamp", &PipelineState::setTimestamp, DOC(dai, Buffer, setTimestamp)) + .def("setTimestampDevice", &PipelineState::setTimestampDevice, DOC(dai, Buffer, setTimestampDevice)) + .def("setSequenceNum", &PipelineState::setSequenceNum, DOC(dai, Buffer, setSequenceNum)); +} diff --git a/bindings/python/src/pipeline/node/NodeBindings.cpp b/bindings/python/src/pipeline/node/NodeBindings.cpp index d58ac38a99..715c689d07 100644 --- a/bindings/python/src/pipeline/node/NodeBindings.cpp +++ b/bindings/python/src/pipeline/node/NodeBindings.cpp @@ -153,6 +153,7 @@ void bind_uvc(pybind11::module& m, void* pCallstack); void bind_thermal(pybind11::module& m, void* pCallstack); void bind_tof(pybind11::module& m, void* pCallstack); void bind_pointcloud(pybind11::module& m, void* pCallstack); +void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack); void bind_sync(pybind11::module& m, void* pCallstack); void bind_messagedemux(pybind11::module& m, void* pCallstack); void bind_hostnode(pybind11::module& m, void* pCallstack); @@ -202,6 +203,7 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_thermal); callstack.push_front(bind_tof); callstack.push_front(bind_pointcloud); + callstack.push_front(bind_pipelineeventaggregation); callstack.push_front(bind_sync); callstack.push_front(bind_messagedemux); callstack.push_front(bind_hostnode); @@ -445,6 +447,7 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack) { .def("error", [](dai::ThreadedNode& node, const std::string& msg) { node.pimpl->logger->error(msg); }) .def("critical", [](dai::ThreadedNode& node, const std::string& msg) { node.pimpl->logger->critical(msg); }) .def("isRunning", &ThreadedNode::isRunning, DOC(dai, ThreadedNode, isRunning)) + .def("mainLoop", &ThreadedNode::mainLoop, DOC(dai, ThreadedNode, mainLoop)) .def("setLogLevel", &ThreadedNode::setLogLevel, DOC(dai, ThreadedNode, setLogLevel)) .def("getLogLevel", &ThreadedNode::getLogLevel, DOC(dai, ThreadedNode, getLogLevel)); } diff --git a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp new file mode 100644 index 0000000000..aa52d802a9 --- /dev/null +++ b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp @@ -0,0 +1,37 @@ +#include "Common.hpp" +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" +#include "depthai/properties/internal/PipelineEventAggregationProperties.hpp" + +void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack) { + // using namespace dai; + // using namespace dai::node::internal; + // + // // Node and Properties declare upfront + // py::class_ pipelineEventAggregationProperties( + // m, "PipelineEventAggregationProperties", DOC(dai, PipelineEventAggregationProperties)); + // auto pipelineEventAggregation = ADD_NODE(PipelineEventAggregation); + // + // /////////////////////////////////////////////////////////////////////// + // /////////////////////////////////////////////////////////////////////// + // /////////////////////////////////////////////////////////////////////// + // // Call the rest of the type defines, then perform the actual bindings + // Callstack* callstack = (Callstack*)pCallstack; + // auto cb = callstack->top(); + // callstack->pop(); + // cb(m, pCallstack); + // // Actual bindings + // /////////////////////////////////////////////////////////////////////// + // /////////////////////////////////////////////////////////////////////// + // /////////////////////////////////////////////////////////////////////// + // + // // Properties + // pipelineEventAggregationProperties.def_readwrite("aggregationWindowSize", &PipelineEventAggregationProperties::aggregationWindowSize) + // .def_readwrite("eventBatchSize", &PipelineEventAggregationProperties::eventBatchSize); + // + // // Node + // pipelineEventAggregation.def_readonly("out", &PipelineEventAggregation::out, DOC(dai, node, PipelineEventAggregation, out)) + // .def_readonly("inputs", &PipelineEventAggregation::inputs, DOC(dai, node, PipelineEventAggregation, inputs)) + // .def("setRunOnHost", &PipelineEventAggregation::setRunOnHost, py::arg("runOnHost"), DOC(dai, node, PipelineEventAggregation, setRunOnHost)) + // .def("runOnHost", &PipelineEventAggregation::runOnHost, DOC(dai, node, PipelineEventAggregation, runOnHost)); + // daiNodeModule.attr("PipelineEventAggregation").attr("Properties") = pipelineEventAggregationProperties; +} diff --git a/bindings/python/src/py_bindings.cpp b/bindings/python/src/py_bindings.cpp index 122d030163..cbbb4400e1 100644 --- a/bindings/python/src/py_bindings.cpp +++ b/bindings/python/src/py_bindings.cpp @@ -37,6 +37,7 @@ #include "pipeline/node/NodeBindings.hpp" #include "remote_connection/RemoteConnectionBindings.hpp" #include "utility/EventsManagerBindings.hpp" +#include "utility/PipelineEventDispatcherBindings.hpp" #ifdef DEPTHAI_HAVE_OPENCV_SUPPORT #include #endif @@ -87,6 +88,7 @@ PYBIND11_MODULE(depthai, m) callstack.push_front(&CalibrationHandlerBindings::bind); callstack.push_front(&ZooBindings::bind); callstack.push_front(&EventsManagerBindings::bind); + callstack.push_front(&PipelineEventDispatcherBindings::bind); callstack.push_front(&RemoteConnectionBindings::bind); callstack.push_front(&FilterParamsBindings::bind); // end of the callstack diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp new file mode 100644 index 0000000000..5fccc05057 --- /dev/null +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp @@ -0,0 +1,30 @@ +#include "PipelineEventDispatcherBindings.hpp" +#include "depthai/utility/PipelineEventDispatcher.hpp" + +void PipelineEventDispatcherBindings::bind(pybind11::module& m, void* pCallstack) { + using namespace dai; + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + using namespace dai::utility; + auto pipelineEventDispatcher = py::class_(m, "PipelineEventDispatcher"); + + pipelineEventDispatcher + .def(py::init(), py::arg("output")) + .def("setNodeId", &PipelineEventDispatcher::setNodeId, py::arg("id"), DOC(dai, utility, PipelineEventDispatcher, setNodeId)) + .def("startCustomEvent", &PipelineEventDispatcher::startCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, startCustomEvent)) + .def("endCustomEvent", &PipelineEventDispatcher::endCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, endCustomEvent)) + .def("pingCustomEvent", &PipelineEventDispatcher::pingCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, pingCustomEvent)) + .def("inputBlockEvent", &PipelineEventDispatcher::inputBlockEvent, py::arg("source") = "defaultInputGroup", DOC(dai, utility, PipelineEventDispatcher, inputBlockEvent)) + .def("outputBlockEvent", &PipelineEventDispatcher::outputBlockEvent, py::arg("source") = "defaultOutputGroup", DOC(dai, utility, PipelineEventDispatcher, outputBlockEvent)) + .def("customBlockEvent", &PipelineEventDispatcher::customBlockEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, customBlockEvent)); +} diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp new file mode 100644 index 0000000000..36e074193e --- /dev/null +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp @@ -0,0 +1,8 @@ +#pragma once + +// pybind +#include "pybind11_common.hpp" + +struct PipelineEventDispatcherBindings { + static void bind(pybind11::module& m, void* pCallstack); +}; diff --git a/bindings/python/tests/messsage_queue_test.py b/bindings/python/tests/messsage_queue_test.py index 120c87b2e9..0334ca52f6 100644 --- a/bindings/python/tests/messsage_queue_test.py +++ b/bindings/python/tests/messsage_queue_test.py @@ -387,7 +387,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() self.output.send(buffer) @@ -399,7 +399,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = dai.Buffer() self.output.send(buffer) time.sleep(0.001) diff --git a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake index 1e26e9d084..380cdd5e95 100644 --- a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake +++ b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake @@ -3,4 +3,4 @@ set(DEPTHAI_DEVICE_RVC4_MATURITY "snapshot") # "version if applicable" -set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+cd0fc231d8c860acf11ab424109d7e42500626ae") +set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+e19591198e276d1580a98f77b3387c1abc881042") diff --git a/examples/cpp/AprilTags/april_tags_replay.cpp b/examples/cpp/AprilTags/april_tags_replay.cpp index c03958fa4c..71779178d5 100644 --- a/examples/cpp/AprilTags/april_tags_replay.cpp +++ b/examples/cpp/AprilTags/april_tags_replay.cpp @@ -27,7 +27,7 @@ class ImageReplay : public dai::NodeCRTP { return; } - while(isRunning()) { + while(mainLoop()) { // Read the frame from the camera cv::Mat frame; if(!cap.read(frame)) { @@ -96,4 +96,4 @@ int main() { pipeline.wait(); return 0; -} \ No newline at end of file +} diff --git a/examples/cpp/HostNodes/image_manip_host.cpp b/examples/cpp/HostNodes/image_manip_host.cpp index ba1d5df8c5..8688237e1e 100644 --- a/examples/cpp/HostNodes/image_manip_host.cpp +++ b/examples/cpp/HostNodes/image_manip_host.cpp @@ -5,11 +5,14 @@ #include "depthai/pipeline/node/host/Display.hpp" #include "depthai/pipeline/node/host/Replay.hpp" +#ifndef VIDEO_PATH +#define VIDEO_PATH "" +#endif + int main(int argc, char** argv) { - if(argc <= 1) { - std::cout << "Video parameter is missing" << std::endl; - std::cout << "Usage: ./image_manip_host video_path" << std::endl; - return -1; + std::string videoFile = VIDEO_PATH; + if(argc > 1) { + videoFile = argv[1]; } dai::Pipeline pipeline(false); @@ -27,7 +30,7 @@ int main(int argc, char** argv) { manip->initialConfig->addFlipVertical(); manip->initialConfig->setFrameType(dai::ImgFrame::Type::RGB888p); - replay->setReplayVideoFile(argv[1]); + replay->setReplayVideoFile(videoFile); replay->setOutFrameType(dai::ImgFrame::Type::NV12); replay->setFps(30); replay->setSize(1280, 720); @@ -37,5 +40,13 @@ int main(int argc, char** argv) { pipeline.start(); - pipeline.wait(); + // TODO remove before merge + while(pipeline.isRunning()) { + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); + std::cout << "Pipeline state: " << pipeline.getPipelineState().nodes().detailed().str() << std::endl; + } + + pipeline.stop(); + // + // pipeline.wait(); } diff --git a/examples/cpp/HostNodes/threaded_host_node.cpp b/examples/cpp/HostNodes/threaded_host_node.cpp index 90b95560cd..60c02c0080 100644 --- a/examples/cpp/HostNodes/threaded_host_node.cpp +++ b/examples/cpp/HostNodes/threaded_host_node.cpp @@ -10,7 +10,7 @@ class TestPassthrough : public dai::node::CustomThreadedNode { Output output = dai::Node::Output{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = input.get(); if(buffer) { std::cout << "The passthrough node received a buffer!" << std::endl; @@ -25,7 +25,7 @@ class TestSink : public dai::node::CustomThreadedNode { Input input = dai::Node::Input{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = input.get(); if(buffer) { std::cout << "The sink node received a buffer!" << std::endl; @@ -39,7 +39,7 @@ class TestSource : public dai::node::CustomThreadedNode { Output output = dai::Node::Output{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = std::make_shared(); std::cout << "The source node is sending a buffer!" << std::endl; output.send(buffer); diff --git a/examples/cpp/ObjectTracker/object_tracker_replay.cpp b/examples/cpp/ObjectTracker/object_tracker_replay.cpp index 7a55868262..6b44ae83cc 100644 --- a/examples/cpp/ObjectTracker/object_tracker_replay.cpp +++ b/examples/cpp/ObjectTracker/object_tracker_replay.cpp @@ -37,16 +37,33 @@ int main() { // Start pipeline pipeline.start(); + // TODO remove before merge + nlohmann::json j; + j["pipeline"] = pipeline.getPipelineSchema(); + std::cout << "Pipeline schema: " << j.dump(2) << std::endl; + // + // FPS calculation variables auto startTime = std::chrono::steady_clock::now(); int counter = 0; float fps = 0; cv::Scalar color(255, 255, 255); + // TODO remove before merge + int index = 0; + // + while(pipeline.isRunning()) { auto imgFrame = preview->get(); auto track = tracklets->get(); + // TODO remove before merge + if(index++ % 30 == 0) { + std::cout << "----------------------------------------" << std::endl; + std::cout << "Pipeline state: " << pipeline.getPipelineState().nodes().detailed().str() << std::endl; + } + // + counter++; auto currentTime = std::chrono::steady_clock::now(); auto elapsed = std::chrono::duration_cast(currentTime - startTime).count(); diff --git a/examples/cpp/RGBD/rgbd.cpp b/examples/cpp/RGBD/rgbd.cpp index 59369862ab..d4f73f2734 100644 --- a/examples/cpp/RGBD/rgbd.cpp +++ b/examples/cpp/RGBD/rgbd.cpp @@ -15,7 +15,7 @@ class RerunNode : public dai::NodeCRTP { const auto rec = rerun::RecordingStream("rerun"); rec.spawn().exit_on_failure(); rec.log_static("world", rerun::ViewCoordinates::RDF); - while(isRunning()) { + while(mainLoop()) { auto pclIn = inputPCL.get(); auto rgbdIn = inputRGBD.get(); if(pclIn != nullptr) { diff --git a/examples/cpp/RGBD/rgbd_pcl_processing.cpp b/examples/cpp/RGBD/rgbd_pcl_processing.cpp index 4f6caa1625..4bba9e3d98 100644 --- a/examples/cpp/RGBD/rgbd_pcl_processing.cpp +++ b/examples/cpp/RGBD/rgbd_pcl_processing.cpp @@ -16,7 +16,7 @@ class CustomPCLProcessingNode : public dai::NodeCRTP(); auto pclOut = std::make_shared(); if(pclIn != nullptr) { diff --git a/examples/cpp/RVC2/VSLAM/rerun_node.hpp b/examples/cpp/RVC2/VSLAM/rerun_node.hpp index dcea0fd91b..0c7fd1a41e 100644 --- a/examples/cpp/RVC2/VSLAM/rerun_node.hpp +++ b/examples/cpp/RVC2/VSLAM/rerun_node.hpp @@ -41,7 +41,7 @@ class RerunNode : public dai::NodeCRTP { rec.spawn().exit_on_failure(); rec.log_static("world", rerun::ViewCoordinates::FLU); rec.log("world/ground", rerun::Boxes3D::from_half_sizes({{3.f, 3.f, 0.00001f}})); - while(isRunning()) { + while(mainLoop()) { std::shared_ptr transData = inputTrans.get(); auto imgFrame = inputImg.get(); if(!intrinsicsSet) { @@ -107,4 +107,4 @@ class RerunNode : public dai::NodeCRTP { float fx = 400.0; float fy = 400.0; bool intrinsicsSet = false; -}; \ No newline at end of file +}; diff --git a/examples/python/AprilTags/april_tags_replay.py b/examples/python/AprilTags/april_tags_replay.py index 01cae83a97..fe6dec7f26 100644 --- a/examples/python/AprilTags/april_tags_replay.py +++ b/examples/python/AprilTags/april_tags_replay.py @@ -24,7 +24,7 @@ def __init__(self): imgFrame.setType(dai.ImgFrame.Type.GRAY8) self.imgFrame = imgFrame def run(self): - while self.isRunning(): + while self.mainLoop(): self.output.send(self.imgFrame) time.sleep(0.03) diff --git a/examples/python/HostNodes/host_camera.py b/examples/python/HostNodes/host_camera.py index fd458b84b8..83dd78089a 100644 --- a/examples/python/HostNodes/host_camera.py +++ b/examples/python/HostNodes/host_camera.py @@ -13,7 +13,7 @@ def run(self): if not cap.isOpened(): p.stop() raise RuntimeError("Error: Couldn't open host camera") - while self.isRunning(): + while self.mainLoop(): # Read the frame from the camera ret, frame = cap.read() if not ret: @@ -40,4 +40,4 @@ def run(self): key = cv2.waitKey(1) if key == ord('q'): p.stop() - break \ No newline at end of file + break diff --git a/examples/python/HostNodes/threaded_host_nodes.py b/examples/python/HostNodes/threaded_host_nodes.py index 7b3c508279..4118fd7887 100644 --- a/examples/python/HostNodes/threaded_host_nodes.py +++ b/examples/python/HostNodes/threaded_host_nodes.py @@ -31,7 +31,7 @@ def onStop(self): print("Goodbye from", self.name) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() print("The passthrough node received a buffer!") self.output.send(buffer) @@ -47,7 +47,7 @@ def onStart(self): print("Hello, this is", self.name) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() del buffer print(f"{self.name} node received a buffer!") @@ -59,7 +59,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = dai.Buffer() print(f"{self.name} node is sending a buffer!") self.output.send(buffer) @@ -77,7 +77,7 @@ def __init__(self, name: str): self.passthrough1.output.link(self.passthrough2.input) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() self.output.send(buffer) @@ -105,4 +105,4 @@ def run(self): p.start() while p.isRunning(): time.sleep(1) - print("Pipeline is running...") \ No newline at end of file + print("Pipeline is running...") diff --git a/examples/python/RGBD/rgbd.py b/examples/python/RGBD/rgbd.py index 4f22020971..5e15d796d6 100644 --- a/examples/python/RGBD/rgbd.py +++ b/examples/python/RGBD/rgbd.py @@ -21,7 +21,7 @@ def run(self): rr.init("", spawn=True) rr.log("world", rr.ViewCoordinates.RDF) rr.log("world/ground", rr.Boxes3D(half_sizes=[3.0, 3.0, 0.00001])) - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.get() except dai.MessageQueue.QueueException: diff --git a/examples/python/RGBD/rgbd_o3d.py b/examples/python/RGBD/rgbd_o3d.py index 0f7d2787f7..5ba5140155 100644 --- a/examples/python/RGBD/rgbd_o3d.py +++ b/examples/python/RGBD/rgbd_o3d.py @@ -33,7 +33,7 @@ def key_callback(vis, action, mods): ) vis.add_geometry(coordinateFrame) first = True - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.tryGet() except dai.MessageQueue.QueueException: diff --git a/examples/python/RGBD/rgbd_pcl_processing.py b/examples/python/RGBD/rgbd_pcl_processing.py index 804d83e77c..0b1930bace 100644 --- a/examples/python/RGBD/rgbd_pcl_processing.py +++ b/examples/python/RGBD/rgbd_pcl_processing.py @@ -11,7 +11,7 @@ def __init__(self): self.thresholdDistance = 3000.0 def run(self): - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.get() except dai.MessageQueue.QueueException: diff --git a/examples/python/RVC2/VSLAM/rerun_node.py b/examples/python/RVC2/VSLAM/rerun_node.py index 4211b7ef1e..caefda601a 100644 --- a/examples/python/RVC2/VSLAM/rerun_node.py +++ b/examples/python/RVC2/VSLAM/rerun_node.py @@ -36,7 +36,7 @@ def run(self): rr.init("", spawn=True) rr.log("world", rr.ViewCoordinates.FLU) rr.log("world/ground", rr.Boxes3D(half_sizes=[3.0, 3.0, 0.00001])) - while self.isRunning(): + while self.mainLoop(): transData = self.inputTrans.get() imgFrame = self.inputImg.get() if not self.intrinsicsSet: @@ -63,4 +63,4 @@ def run(self): points, colors = pclGrndData.getPointsRGB() rr.log("world/ground_pcl", rr.Points3D(points, colors=colors, radii=[0.01])) if mapData is not None: - rr.log("map", rr.Image(mapData.getCvFrame())) \ No newline at end of file + rr.log("map", rr.Image(mapData.getCvFrame())) diff --git a/examples/python/Visualizer/visualizer_encoded.py b/examples/python/Visualizer/visualizer_encoded.py index 9c6ab195af..78d0eca6fe 100644 --- a/examples/python/Visualizer/visualizer_encoded.py +++ b/examples/python/Visualizer/visualizer_encoded.py @@ -20,7 +20,7 @@ def __init__(self): def setLabelMap(self, labelMap): self.labelMap = labelMap def run(self): - while self.isRunning(): + while self.mainLoop(): nnData = self.inputDet.get() detections = nnData.detections imgAnnt = dai.ImgAnnotations() diff --git a/include/depthai/pipeline/MessageQueue.hpp b/include/depthai/pipeline/MessageQueue.hpp index e7667e6238..43e1d31e6e 100644 --- a/include/depthai/pipeline/MessageQueue.hpp +++ b/include/depthai/pipeline/MessageQueue.hpp @@ -7,6 +7,7 @@ // project #include "depthai/pipeline/datatype/ADatatype.hpp" #include "depthai/utility/LockingQueue.hpp" +#include "depthai/utility/PipelineEventDispatcherInterface.hpp" // shared namespace dai { @@ -37,11 +38,15 @@ class MessageQueue : public std::enable_shared_from_this { private: void callCallbacks(std::shared_ptr msg); + std::shared_ptr pipelineEventDispatcher; public: // DataOutputQueue constructor explicit MessageQueue(unsigned int maxSize = 16, bool blocking = true); - explicit MessageQueue(std::string name, unsigned int maxSize = 16, bool blocking = true); + explicit MessageQueue(std::string name, + unsigned int maxSize = 16, + bool blocking = true, + std::shared_ptr pipelineEventDispatcher = nullptr); MessageQueue(const MessageQueue& c) : enable_shared_from_this(c), queue(c.queue), name(c.name), callbacks(c.callbacks), uniqueCallbackId(c.uniqueCallbackId){}; @@ -196,11 +201,15 @@ class MessageQueue : public std::enable_shared_from_this { */ template std::shared_ptr tryGet() { + if(pipelineEventDispatcher) pipelineEventDispatcher->startInputEvent(name, getSize()); if(queue.isDestroyed()) { throw QueueException(CLOSED_QUEUE_MESSAGE); } std::shared_ptr val = nullptr; - if(!queue.tryPop(val)) return nullptr; + if(!queue.tryPop(val)) { + return nullptr; + } + if(pipelineEventDispatcher) pipelineEventDispatcher->endInputEvent(name, getSize()); return std::dynamic_pointer_cast(val); } @@ -220,10 +229,12 @@ class MessageQueue : public std::enable_shared_from_this { */ template std::shared_ptr get() { + if(pipelineEventDispatcher) pipelineEventDispatcher->startInputEvent(name, getSize()); std::shared_ptr val = nullptr; if(!queue.waitAndPop(val)) { throw QueueException(CLOSED_QUEUE_MESSAGE); } + if(pipelineEventDispatcher) pipelineEventDispatcher->endInputEvent(name, getSize()); return std::dynamic_pointer_cast(val); } diff --git a/include/depthai/pipeline/Node.hpp b/include/depthai/pipeline/Node.hpp index cd6ef073ef..85d826a3fa 100644 --- a/include/depthai/pipeline/Node.hpp +++ b/include/depthai/pipeline/Node.hpp @@ -20,6 +20,7 @@ #include "depthai/capabilities/Capability.hpp" #include "depthai/pipeline/datatype/DatatypeEnum.hpp" #include "depthai/properties/Properties.hpp" +#include "depthai/utility/PipelineEventDispatcherInterface.hpp" // libraries #include @@ -83,6 +84,8 @@ class Node : public std::enable_shared_from_this { std::vector inputMapRefs; std::vector*> nodeRefs; + std::shared_ptr pipelineEventDispatcher; + // helpers for setting refs void setOutputRefs(std::initializer_list l); void setOutputRefs(Output* outRef); @@ -129,11 +132,12 @@ class Node : public std::enable_shared_from_this { std::vector queueConnections; Type type = Type::MSender; // Slave sender not supported yet OutputDescription desc; + std::shared_ptr pipelineEventDispatcher; public: // std::vector possibleCapabilities; - Output(Node& par, OutputDescription desc, bool ref = true) : parent(par), desc(std::move(desc)) { + Output(Node& par, OutputDescription desc, bool ref = true) : parent(par), desc(std::move(desc)), pipelineEventDispatcher(par.pipelineEventDispatcher) { // Place oneself to the parents references if(ref) { par.setOutputRefs(this); @@ -346,16 +350,14 @@ class Node : public std::enable_shared_from_this { public: std::vector possibleDatatypes; explicit Input(Node& par, InputDescription desc, bool ref = true) - : MessageQueue(std::move(desc.name), desc.queueSize, desc.blocking), + : MessageQueue(desc.name.empty() ? par.createUniqueInputName() : desc.name, desc.queueSize, desc.blocking, par.pipelineEventDispatcher), parent(par), waitForMessage(desc.waitForMessage), + group(desc.group), possibleDatatypes(std::move(desc.types)) { if(ref) { par.setInputRefs(this); } - if(getName().empty()) { - setName(par.createUniqueInputName()); - } } /** diff --git a/include/depthai/pipeline/NodeObjInfo.hpp b/include/depthai/pipeline/NodeObjInfo.hpp index da1c87a0d1..c786065dd3 100644 --- a/include/depthai/pipeline/NodeObjInfo.hpp +++ b/include/depthai/pipeline/NodeObjInfo.hpp @@ -15,6 +15,7 @@ struct NodeObjInfo { std::string name; std::string alias; + std::string device; std::vector properties; @@ -27,6 +28,6 @@ struct NodeObjInfo { std::unordered_map, NodeIoInfo, IoInfoKey> ioInfo; }; -DEPTHAI_SERIALIZE_EXT(NodeObjInfo, id, parentId, name, alias, properties, logLevel, ioInfo); +DEPTHAI_SERIALIZE_EXT(NodeObjInfo, id, parentId, name, alias, device, properties, logLevel, ioInfo); } // namespace dai diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 83457a05f9..4991b770a0 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -12,14 +12,18 @@ #include "AssetManager.hpp" #include "DeviceNode.hpp" #include "Node.hpp" +#include "PipelineStateApi.hpp" #include "depthai/device/CalibrationHandler.hpp" #include "depthai/device/Device.hpp" #include "depthai/openvino/OpenVINO.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" #include "depthai/utility/AtomicBool.hpp" // shared #include "depthai/device/BoardConfig.hpp" +#include "depthai/pipeline/InputQueue.hpp" #include "depthai/pipeline/PipelineSchema.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" #include "depthai/properties/GlobalProperties.hpp" #include "depthai/utility/RecordReplay.hpp" @@ -53,6 +57,7 @@ class PipelineImpl : public std::enable_shared_from_this { // Functions Node::Id getNextUniqueId(); PipelineSchema getPipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; + PipelineSchema getDevicePipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; Device::Config getDeviceConfig() const; void setCameraTuningBlobPath(const fs::path& path); void setXLinkChunkSize(int sizeBytes); @@ -85,6 +90,9 @@ class PipelineImpl : public std::enable_shared_from_this { bool isHostOnly() const; bool isDeviceOnly() const; + // Pipeline state getters + PipelineStateApi getPipelineState(); + // Must be incremented and unique for each node Node::Id latestId = 0; // Pipeline asset manager @@ -97,6 +105,7 @@ class PipelineImpl : public std::enable_shared_from_this { // using NodeMap = std::unordered_map>; // NodeMap nodeMap; std::vector> nodes; + std::vector> xlinkBridges; // TODO(themarpe) - refactor, connections are now carried by nodes instead using NodeConnectionMap = std::unordered_map>; @@ -114,6 +123,11 @@ class PipelineImpl : public std::enable_shared_from_this { std::unordered_map recordReplayFilenames; bool removeRecordReplayFiles = true; std::string defaultDeviceId; + bool pipelineOnHost = true; + + // Pipeline events + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; // Output queues std::vector> outputQueues; @@ -290,6 +304,11 @@ class Pipeline { */ PipelineSchema getPipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; + /** + * @returns Device pipeline schema (without host only nodes and connections) + */ + PipelineSchema getDevicePipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; + // void loadAssets(AssetManager& assetManager); void serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage) const { impl()->serialize(schema, assets, assetStorage); @@ -477,6 +496,10 @@ class Pipeline { void build() { impl()->build(); } + void buildDevice() { + impl()->pipelineOnHost = false; + impl()->build(); + } void start() { impl()->start(); } @@ -506,6 +529,9 @@ class Pipeline { /// Record and Replay void enableHolisticRecord(const RecordConfig& config); void enableHolisticReplay(const std::string& pathToRecording); + + // Pipeline state getters + PipelineStateApi getPipelineState(); }; } // namespace dai diff --git a/include/depthai/pipeline/PipelineSchema.hpp b/include/depthai/pipeline/PipelineSchema.hpp index b8cd497d0d..a9fee1adaf 100644 --- a/include/depthai/pipeline/PipelineSchema.hpp +++ b/include/depthai/pipeline/PipelineSchema.hpp @@ -14,8 +14,9 @@ struct PipelineSchema { std::vector connections; GlobalProperties globalProperties; std::unordered_map nodes; + std::vector> bridges; }; -DEPTHAI_SERIALIZE_EXT(PipelineSchema, connections, globalProperties, nodes); +DEPTHAI_SERIALIZE_EXT(PipelineSchema, connections, globalProperties, nodes, bridges); } // namespace dai diff --git a/include/depthai/pipeline/PipelineStateApi.hpp b/include/depthai/pipeline/PipelineStateApi.hpp new file mode 100644 index 0000000000..90f0bd98ea --- /dev/null +++ b/include/depthai/pipeline/PipelineStateApi.hpp @@ -0,0 +1,96 @@ +#pragma once + +#include "Node.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" + +namespace dai { + +/** + * pipeline.getState().nodes({nodeId1}).summary() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).detailed() -> std::unordered_map; + * pipeline.getState().nodes(nodeId1).detailed() -> NodeState; + * pipeline.getState().nodes({nodeId1}).outputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs({outputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(outputName) -> TimingStats; + * pipeline.getState().nodes({nodeId1}).events(); + * pipeline.getState().nodes({nodeId1}).inputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs({inputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs(inputName) -> QueueState; + * pipeline.getState().nodes({nodeId1}).otherStats() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).otherStats({statName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(statName) -> TimingStats; + */ +class NodesStateApi { + std::vector nodeIds; + + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + + public: + explicit NodesStateApi(std::vector nodeIds, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + : nodeIds(std::move(nodeIds)), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} + PipelineState summary(); + PipelineState detailed(); + std::unordered_map> outputs(); + std::unordered_map> inputs(); + std::unordered_map> otherTimings(); +}; +class NodeStateApi { + Node::Id nodeId; + + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + + public: + explicit NodeStateApi(Node::Id nodeId, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + : nodeId(nodeId), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} + NodeState summary() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).summary().nodeStates[nodeId]; + } + NodeState detailed() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).detailed().nodeStates[nodeId]; + } + std::unordered_map outputs() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).outputs()[nodeId]; + } + std::unordered_map inputs() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).inputs()[nodeId]; + } + std::unordered_map otherTimings() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).otherTimings()[nodeId]; + } + std::unordered_map outputs(const std::vector& outputNames); + NodeState::OutputQueueState outputs(const std::string& outputName); + std::vector events(); + std::unordered_map inputs(const std::vector& inputNames); + NodeState::InputQueueState inputs(const std::string& inputName); + std::unordered_map otherTimings(const std::vector& statNames); + NodeState::Timing otherStats(const std::string& statName); +}; +class PipelineStateApi { + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + std::vector nodeIds; // empty means all nodes + + public: + PipelineStateApi(std::shared_ptr pipelineStateOut, + std::shared_ptr pipelineStateRequest, + const std::vector>& allNodes) + : pipelineStateOut(std::move(pipelineStateOut)), pipelineStateRequest(std::move(pipelineStateRequest)) { + for(const auto& n : allNodes) { + nodeIds.push_back(n->id); + } + } + NodesStateApi nodes() { + return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); + } + NodesStateApi nodes(const std::vector& nodeIds) { + return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); + } + NodeStateApi nodes(Node::Id nodeId) { + return NodeStateApi(nodeId, pipelineStateOut, pipelineStateRequest); + } +}; + +} // namespace dai diff --git a/include/depthai/pipeline/ThreadedNode.hpp b/include/depthai/pipeline/ThreadedNode.hpp index d9657f6a5f..d5a2147b01 100644 --- a/include/depthai/pipeline/ThreadedNode.hpp +++ b/include/depthai/pipeline/ThreadedNode.hpp @@ -9,10 +9,17 @@ namespace dai { class ThreadedNode : public Node { + friend class PipelineImpl; + private: JoiningThread thread; AtomicBool running{false}; + protected: + Output pipelineEventOutput{*this, {"pipelineEventOutput", DEFAULT_GROUP, {{{DatatypeEnum::PipelineEvent, false}}}}}; + + void initPipelineEventDispatcher(int64_t nodeId); + public: using Node::Node; ThreadedNode(); @@ -45,6 +52,8 @@ class ThreadedNode : public Node { // check if still running bool isRunning() const; + bool mainLoop(); + /** * @brief Sets the logging severity level for this node. * @@ -59,6 +68,10 @@ class ThreadedNode : public Node { */ virtual dai::LogLevel getLogLevel() const; + utility::PipelineEventDispatcherInterface::BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup"); + utility::PipelineEventDispatcherInterface::BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup"); + utility::PipelineEventDispatcherInterface::BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source); + class Impl; spimpl::impl_ptr pimpl; }; diff --git a/include/depthai/pipeline/datatype/DatatypeEnum.hpp b/include/depthai/pipeline/datatype/DatatypeEnum.hpp index d8d594263d..7be2fa4f77 100644 --- a/include/depthai/pipeline/datatype/DatatypeEnum.hpp +++ b/include/depthai/pipeline/datatype/DatatypeEnum.hpp @@ -43,6 +43,9 @@ enum class DatatypeEnum : std::int32_t { DynamicCalibrationResult, CalibrationQuality, CoverageData, + PipelineEvent, + PipelineState, + PipelineEventAggregationConfig, }; bool isDatatypeSubclassOf(DatatypeEnum parent, DatatypeEnum children); diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp new file mode 100644 index 0000000000..03293fa979 --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include "depthai/common/optional.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" + +namespace dai { + +/** + * Pipeline event message. + */ +class PipelineEvent : public Buffer { + public: + enum class Type : std::int32_t { + CUSTOM = 0, + LOOP = 1, + INPUT = 2, + OUTPUT = 3, + INPUT_BLOCK = 4, + OUTPUT_BLOCK = 5, + }; + enum class Interval : std::int32_t { + NONE = 0, + START = 1, + END = 2 + }; + + PipelineEvent() = default; + virtual ~PipelineEvent() = default; + + int64_t nodeId = -1; + int32_t status = 0; + std::optional queueSize; + Interval interval = Interval::NONE; + Type type = Type::CUSTOM; + std::string source; + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineEvent; + }; + + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, status, queueSize, interval, type, source); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp new file mode 100644 index 0000000000..e321bccea0 --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp @@ -0,0 +1,36 @@ +#pragma once +#include +#include + +#include "depthai/common/optional.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" +#include "depthai/pipeline/datatype/DatatypeEnum.hpp" + +namespace dai { + +class NodeEventAggregationConfig { + public: + int64_t nodeId = -1; + std::optional> inputs; + std::optional> outputs; + std::optional> others; + bool events = false; + + DEPTHAI_SERIALIZE(NodeEventAggregationConfig, nodeId, inputs, outputs, others, events); +}; + +/// PipelineEventAggregationConfig configuration structure +class PipelineEventAggregationConfig : public Buffer { + public: + std::vector nodes; + bool repeat = false; // Keep sending the aggregated state without waiting for new config + + PipelineEventAggregationConfig() = default; + virtual ~PipelineEventAggregationConfig(); + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override; + + DEPTHAI_SERIALIZE(PipelineEventAggregationConfig, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodes, repeat); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp new file mode 100644 index 0000000000..ea73c38a51 --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -0,0 +1,111 @@ +#pragma once + +#include + +#include "depthai/pipeline/datatype/Buffer.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { + +class NodeState { + public: + struct DurationEvent { + PipelineEvent startEvent; + uint64_t durationUs; + DEPTHAI_SERIALIZE(DurationEvent, startEvent, durationUs); + }; + struct TimingStats { + uint64_t minMicros = -1; + uint64_t maxMicros; + uint64_t averageMicrosRecent; + uint64_t stdDevMicrosRecent; + uint64_t minMicrosRecent = -1; + uint64_t maxMicrosRecent; + uint64_t medianMicrosRecent; + DEPTHAI_SERIALIZE(TimingStats, minMicros, maxMicros, averageMicrosRecent, stdDevMicrosRecent, minMicrosRecent, maxMicrosRecent, medianMicrosRecent); + }; + struct Timing { + float fps; + TimingStats durationStats; + DEPTHAI_SERIALIZE(Timing, fps, durationStats); + }; + struct QueueStats { + uint32_t maxQueued; + uint32_t minQueuedRecent; + uint32_t maxQueuedRecent; + uint32_t medianQueuedRecent; + DEPTHAI_SERIALIZE(QueueStats, maxQueued, minQueuedRecent, maxQueuedRecent, medianQueuedRecent); + }; + struct InputQueueState { + // Current state of the input queue. + enum class State : std::int32_t { + IDLE = 0, + WAITING = 1, // Waiting to receive a message + BLOCKED = 2 // An output attempted to send to this input, but the input queue was full + } state = State::IDLE; + // Number of messages currently queued in the input queue + uint32_t numQueued; + // Timing info about this input + Timing timing; + // Queue usage stats + QueueStats queueStats; + DEPTHAI_SERIALIZE(InputQueueState, state, numQueued, timing); + }; + struct OutputQueueState { + // Current state of the output queue. Send should ideally be instant. This is not the case when the input queue is full. + // In that case, the state will be SENDING until there is space in the input queue (unless trySend is used). + enum class State : std::int32_t { + IDLE = 0, + SENDING = 1 + } state = State::IDLE; + // Timing info about this output + Timing timing; + DEPTHAI_SERIALIZE(OutputQueueState, state, timing); + }; + enum class State : std::int32_t { + IDLE = 0, + GETTING_INPUTS = 1, + PROCESSING = 2, + SENDING_OUTPUTS = 3 + }; + + // Current state of the node - idle only when not running + State state = State::IDLE; + // Optional list of recent events + std::vector events; + // Info about each output + std::unordered_map outputStates; + // Info about each input + std::unordered_map inputStates; + // Time spent getting inputs in a loop + Timing inputsGetTiming; + // Time spent sending outputs in a loop + Timing outputsSendTiming; + // Main node loop timing (processing time + inputs get + outputs send) + Timing mainLoopTiming; + // Other timings that the developer of the node decided to add + std::unordered_map otherTimings; + + DEPTHAI_SERIALIZE(NodeState, state, events, outputStates, inputStates, inputsGetTiming, outputsSendTiming, mainLoopTiming, otherTimings); +}; + +/** + * Pipeline event message. + */ +class PipelineState : public Buffer { + public: + PipelineState() = default; + virtual ~PipelineState() = default; + + std::unordered_map nodeStates; + uint32_t configSequenceNum = 0; + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineState; + }; + + DEPTHAI_SERIALIZE(PipelineState, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeStates, configSequenceNum); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp new file mode 100644 index 0000000000..2ca5568412 --- /dev/null +++ b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp @@ -0,0 +1,54 @@ +#pragma once + +#include + +// shared +#include + +namespace dai { +namespace node { +namespace internal { + +/** + * @brief Sync node. Performs syncing between image frames + */ +class PipelineEventAggregation : public DeviceNodeCRTP, public HostRunnable { + private: + bool runOnHostVar = false; + + public: + constexpr static const char* NAME = "PipelineEventAggregation"; + using DeviceNodeCRTP::DeviceNodeCRTP; + + /** + * A map of inputs + */ + InputMap inputs{*this, "inputs", {DEFAULT_NAME, DEFAULT_GROUP, false, 16, {{{DatatypeEnum::PipelineEvent, false}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + + /** + * Input PipelineEventAggregationConfig message with state request parameters + */ + Input request{*this, {"request", DEFAULT_GROUP, DEFAULT_BLOCKING, DEFAULT_QUEUE_SIZE, {{{DatatypeEnum::PipelineEventAggregationConfig, false}}}, false}}; + + /** + * Output message of type + */ + Output out{*this, {"out", DEFAULT_GROUP, {{{DatatypeEnum::PipelineState, false}}}}}; + + /** + * Specify whether to run on host or device + * By default, the node will run on device. + */ + void setRunOnHost(bool runOnHost); + + /** + * Check if the node is set to run on host + */ + bool runOnHost() const override; + + void run() override; +}; + +} // namespace internal +} // namespace node +} // namespace dai diff --git a/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp b/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp new file mode 100644 index 0000000000..e391dfd08d --- /dev/null +++ b/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include "depthai/pipeline/ThreadedHostNode.hpp" + +namespace dai { +namespace node { + +/** + * @brief PipelineStateMerge node. Merges PipelineState messages from device and host into a single output. + */ +class PipelineStateMerge : public CustomThreadedNode { + bool hasDeviceNodes = false; + bool hasHostNodes = false; + + public: + constexpr static const char* NAME = "PipelineStateMerge"; + + Input inputDevice{*this, {"inputDevice", DEFAULT_GROUP, false, 4, {{DatatypeEnum::PipelineState, false}}}}; + Input inputHost{*this, {"inputHost", DEFAULT_GROUP, false, 4, {{DatatypeEnum::PipelineState, false}}}}; + + /** + * Input PipelineEventAggregationConfig message with state request parameters + */ + Input request{*this, {"request", DEFAULT_GROUP, DEFAULT_BLOCKING, DEFAULT_QUEUE_SIZE, {{{DatatypeEnum::PipelineEventAggregationConfig, false}}}, false}}; + + /** + * Output PipelineEventAggregationConfig message with state request parameters + */ + Output outRequest{*this, {"outRequest", DEFAULT_GROUP, {{{DatatypeEnum::PipelineEventAggregationConfig, false}}}}}; + + /** + * Output message of type + */ + Output out{*this, {"out", DEFAULT_GROUP, {{{DatatypeEnum::PipelineState, false}}}}}; + + std::shared_ptr build(bool hasDeviceNodes, bool hasHostNodes); + + void run() override; +}; + +} // namespace node +} // namespace dai diff --git a/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp new file mode 100644 index 0000000000..013dd29bf1 --- /dev/null +++ b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp @@ -0,0 +1,18 @@ +#pragma once + +#include "depthai/properties/Properties.hpp" + +namespace dai { + +/** + * Specify properties for Sync. + */ +struct PipelineEventAggregationProperties : PropertiesSerializable { + uint32_t aggregationWindowSize = 100; + uint32_t eventBatchSize = 50; + uint32_t eventWaitWindow = 16; +}; + +DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, aggregationWindowSize, eventBatchSize, eventWaitWindow); + +} // namespace dai diff --git a/include/depthai/utility/CircularBuffer.hpp b/include/depthai/utility/CircularBuffer.hpp new file mode 100644 index 0000000000..c0428924e9 --- /dev/null +++ b/include/depthai/utility/CircularBuffer.hpp @@ -0,0 +1,169 @@ +#pragma once +#include +#include + +namespace dai { +namespace utility { + +template +class CircularBuffer { + public: + CircularBuffer(size_t size) : maxSize(size) { + buffer.reserve(size); + } + T& add(T value) { + if(buffer.size() < maxSize) { + buffer.push_back(value); + return buffer.back(); + } else { + buffer[index] = value; + index = (index + 1) % maxSize; + return buffer[(index + maxSize - 1) % maxSize]; + } + } + std::vector getBuffer() const { + std::vector result; + if(buffer.size() < maxSize) { + result = buffer; + } else { + result.insert(result.end(), buffer.begin() + index, buffer.end()); + result.insert(result.end(), buffer.begin(), buffer.begin() + index); + } + return result; + } + T first() const { + if(buffer.empty()) { + throw std::runtime_error("CircularBuffer is empty"); + } + if(buffer.size() < maxSize) { + return buffer.front(); + } else { + return buffer[index]; + } + } + T last() const { + if(buffer.empty()) { + throw std::runtime_error("CircularBuffer is empty"); + } + if(buffer.size() < maxSize) { + return buffer.back(); + } else { + return buffer[(index + maxSize - 1) % maxSize]; + } + } + size_t size() const { + return buffer.size(); + } + + // =========================================================== + // Forward iterator + // =========================================================== + class iterator { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + iterator(CircularBuffer* parent, size_t pos) : parent(parent), pos(pos) {} + + reference operator*() { + return parent->buffer[(parent->start() + pos) % parent->buffer.size()]; + } + pointer operator->() { + return &(**this); + } + + iterator& operator++() { + ++pos; + return *this; + } + iterator operator++(int) { + iterator tmp = *this; + ++(*this); + return tmp; + } + + bool operator==(const iterator& other) const { + return parent == other.parent && pos == other.pos; + } + bool operator!=(const iterator& other) const { + return !(*this == other); + } + + private: + CircularBuffer* parent; + size_t pos; + }; + + iterator begin() { + return iterator(this, 0); + } + iterator end() { + return iterator(this, buffer.size()); + } + + // =========================================================== + // Reverse iterator + // =========================================================== + class reverse_iterator { + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + reverse_iterator(CircularBuffer* parent, size_t pos) : parent(parent), pos(pos) {} + + reference operator*() { + // Map reverse position to logical order + size_t logicalIndex = (parent->start() + parent->buffer.size() - 1 - pos) % parent->buffer.size(); + return parent->buffer[logicalIndex]; + } + pointer operator->() { + return &(**this); + } + + reverse_iterator& operator++() { + ++pos; + return *this; + } + reverse_iterator operator++(int) { + reverse_iterator tmp = *this; + ++(*this); + return tmp; + } + + bool operator==(const reverse_iterator& other) const { + return parent == other.parent && pos == other.pos; + } + bool operator!=(const reverse_iterator& other) const { + return !(*this == other); + } + + private: + CircularBuffer* parent; + size_t pos; + }; + + reverse_iterator rbegin() { + return reverse_iterator(this, 0); + } + reverse_iterator rend() { + return reverse_iterator(this, buffer.size()); + } + + private: + std::vector buffer; + size_t maxSize; + size_t index = 0; + + size_t start() const { + return (buffer.size() < maxSize) ? 0 : index; + } +}; + +} // namespace utility +} // namespace dai diff --git a/include/depthai/utility/ImageManipImpl.hpp b/include/depthai/utility/ImageManipImpl.hpp index 505e0e09c1..5472ffd4d0 100644 --- a/include/depthai/utility/ImageManipImpl.hpp +++ b/include/depthai/utility/ImageManipImpl.hpp @@ -59,49 +59,53 @@ void loop(N& node, std::shared_ptr inImage; - while(node.isRunning()) { + while(node.mainLoop()) { std::shared_ptr pConfig; bool hasConfig = false; bool needsImage = true; bool skipImage = false; - if(node.inputConfig.getWaitForMessage()) { - pConfig = node.inputConfig.template get(); - hasConfig = true; - if(inImage != nullptr && hasConfig && pConfig->getReusePreviousImage()) { - needsImage = false; - } - skipImage = pConfig->getSkipCurrentImage(); - } else { - pConfig = node.inputConfig.template tryGet(); - if(pConfig != nullptr) { - hasConfig = true; - } - } + { + auto blockEvent = node.inputBlockEvent(); - if(needsImage) { - inImage = node.inputImage.template get(); - if(inImage == nullptr) { - logger->warn("No input image, skipping frame"); - continue; - } - if(!hasConfig) { - auto _pConfig = node.inputConfig.template tryGet(); - if(_pConfig != nullptr) { - pConfig = _pConfig; + if(node.inputConfig.getWaitForMessage()) { + pConfig = node.inputConfig.template get(); + hasConfig = true; + if(inImage != nullptr && hasConfig && pConfig->getReusePreviousImage()) { + needsImage = false; + } + skipImage = pConfig->getSkipCurrentImage(); + } else { + pConfig = node.inputConfig.template tryGet(); + if(pConfig != nullptr) { hasConfig = true; } } - if(skipImage) { - continue; + + if(needsImage) { + inImage = node.inputImage.template get(); + if(inImage == nullptr) { + logger->warn("No input image, skipping frame"); + continue; + } + if(!hasConfig) { + auto _pConfig = node.inputConfig.template tryGet(); + if(_pConfig != nullptr) { + pConfig = _pConfig; + hasConfig = true; + } + } + if(skipImage) { + continue; + } } - } - // if has new config, parse and check if any changes - if(hasConfig) { - config = *pConfig; - } - if(!node.inputConfig.getWaitForMessage() && config.getReusePreviousImage()) { - logger->warn("reusePreviousImage is only taken into account when inputConfig is synchronous"); + // if has new config, parse and check if any changes + if(hasConfig) { + config = *pConfig; + } + if(!node.inputConfig.getWaitForMessage() && config.getReusePreviousImage()) { + logger->warn("reusePreviousImage is only taken into account when inputConfig is synchronous"); + } } auto startP = std::chrono::steady_clock::now(); @@ -135,7 +139,11 @@ void loop(N& node, if(!success) { logger->error("Processing failed, potentially unsupported config"); } - node.out.send(outImage); + { + auto blockEvent = node.outputBlockEvent(); + + node.out.send(outImage); + } } else { logger->error( "Output image is bigger ({}B) than maximum frame size specified in properties ({}B) - skipping frame.\nPlease use the setMaxOutputFrameSize " diff --git a/include/depthai/utility/LockingQueue.hpp b/include/depthai/utility/LockingQueue.hpp index f4a8593966..40db11abf1 100644 --- a/include/depthai/utility/LockingQueue.hpp +++ b/include/depthai/utility/LockingQueue.hpp @@ -18,6 +18,8 @@ namespace dai { // Mutex& operator=(Mutex&&) = delete; // }; +enum class LockingQueueState { OK, BLOCKED, CANCELLED }; + template class LockingQueue { public: @@ -149,7 +151,7 @@ class LockingQueue { return true; } - bool push(T const& data) { + bool push(T const& data, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -166,6 +168,9 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED, queue.size()); + } signalPop.wait(lock, [this]() { return queue.size() < maxSize || destructed; }); if(destructed) return false; } @@ -176,7 +181,7 @@ class LockingQueue { return true; } - bool push(T&& data) { + bool push(T&& data, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -193,6 +198,9 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED, queue.size()); + } signalPop.wait(lock, [this]() { return queue.size() < maxSize || destructed; }); if(destructed) return false; } @@ -204,7 +212,7 @@ class LockingQueue { } template - bool tryWaitAndPush(T const& data, std::chrono::duration timeout) { + bool tryWaitAndPush(T const& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -221,8 +229,14 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED, queue.size()); + } // First checks predicate, then waits bool pred = signalPop.wait_for(lock, timeout, [this]() { return queue.size() < maxSize || destructed; }); + if(!pred) { + callback(LockingQueueState::CANCELLED, queue.size()); + } if(!pred) return false; if(destructed) return false; } @@ -234,7 +248,7 @@ class LockingQueue { } template - bool tryWaitAndPush(T&& data, std::chrono::duration timeout) { + bool tryWaitAndPush(T&& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -252,7 +266,13 @@ class LockingQueue { } } else { // First checks predicate, then waits + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED, queue.size()); + } bool pred = signalPop.wait_for(lock, timeout, [this]() { return queue.size() < maxSize || destructed; }); + if(!pred) { + callback(LockingQueueState::CANCELLED, queue.size()); + } if(!pred) return false; if(destructed) return false; } diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp new file mode 100644 index 0000000000..6a0595f6ad --- /dev/null +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include +#include + +#include "PipelineEventDispatcherInterface.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +namespace utility { + +class PipelineEventDispatcher : public PipelineEventDispatcherInterface { + struct EventStatus { + PipelineEvent event; + bool ongoing; + }; + + int64_t nodeId = -1; + std::unordered_map events; + Node::Output* out = nullptr; + + void checkNodeId(); + + uint32_t sequenceNum = 0; + + std::mutex mutex; + + public: + PipelineEventDispatcher() = delete; + PipelineEventDispatcher(Node::Output* output) : out(output) {} + + void setNodeId(int64_t id) override; + + void startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) override; + void startInputEvent(const std::string& source, std::optional queueSize = std::nullopt) override; + void startOutputEvent(const std::string& source) override; + void startCustomEvent(const std::string& source) override; + void endEvent(PipelineEvent::Type type, + const std::string& source, + std::optional queueSize = std::nullopt) override; + void endInputEvent(const std::string& source, std::optional queueSize = std::nullopt) override; + void endOutputEvent(const std::string& source) override; + void endCustomEvent(const std::string& source) override; + void pingEvent(PipelineEvent::Type type, const std::string& source) override; + void pingTrackedEvent(PipelineEvent::Type type, const std::string& source, int64_t sequenceNum) override; + void pingMainLoopEvent() override; + void pingCustomEvent(const std::string& source) override; + void pingInputEvent(const std::string& source, int32_t status, std::optional queueSize = std::nullopt) override; + BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source) override; + BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup") override; + BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup") override; + BlockPipelineEvent customBlockEvent(const std::string& source) override; +}; + +} // namespace utility +} // namespace dai diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp new file mode 100644 index 0000000000..e50b35537b --- /dev/null +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -0,0 +1,52 @@ +#pragma once + +#include + +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +namespace utility { + +class PipelineEventDispatcherInterface { + public: + class BlockPipelineEvent { + PipelineEventDispatcherInterface& dispatcher; + PipelineEvent::Type type; + std::string source; + + public: + BlockPipelineEvent(PipelineEventDispatcherInterface& dispatcher, PipelineEvent::Type type, const std::string& source) + : dispatcher(dispatcher), type(type), source(source) { + dispatcher.startEvent(type, source, std::nullopt); + } + ~BlockPipelineEvent() { + dispatcher.endEvent(type, source, std::nullopt); + } + }; + + bool sendEvents = true; + + virtual ~PipelineEventDispatcherInterface() = default; + virtual void setNodeId(int64_t id) = 0; + virtual void startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void startInputEvent(const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void startOutputEvent(const std::string& source) = 0; + virtual void startCustomEvent(const std::string& source) = 0; + virtual void endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void endInputEvent(const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void endOutputEvent(const std::string& source) = 0; + virtual void endCustomEvent(const std::string& source) = 0; + virtual void pingEvent(PipelineEvent::Type type, const std::string& source) = 0; + // The sequenceNum should be unique. Duration is calculated from sequenceNum - 1 to sequenceNum + virtual void pingTrackedEvent(PipelineEvent::Type type, const std::string& source, int64_t sequenceNum) = 0; + virtual void pingMainLoopEvent() = 0; + virtual void pingCustomEvent(const std::string& source) = 0; + virtual void pingInputEvent(const std::string& source, int32_t status, std::optional queueSize = std::nullopt) = 0; + virtual BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source) = 0; + virtual BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup") = 0; + virtual BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup") = 0; + virtual BlockPipelineEvent customBlockEvent(const std::string& source) = 0; +}; + +} // namespace utility +} // namespace dai diff --git a/src/basalt/BasaltVIO.cpp b/src/basalt/BasaltVIO.cpp index ac2b30c8e8..1eda40d088 100644 --- a/src/basalt/BasaltVIO.cpp +++ b/src/basalt/BasaltVIO.cpp @@ -53,7 +53,7 @@ void BasaltVIO::run() { Eigen::Quaterniond q(R); basalt::PoseState::SE3 opticalTransform(q, Eigen::Vector3d(0, 0, 0)); - while(isRunning()) { + while(mainLoop()) { if(!initialized) continue; pimpl->outStateQueue->pop(data); diff --git a/src/pipeline/InputQueue.cpp b/src/pipeline/InputQueue.cpp index 818f418d6d..70526c9b11 100644 --- a/src/pipeline/InputQueue.cpp +++ b/src/pipeline/InputQueue.cpp @@ -14,7 +14,7 @@ InputQueue::InputQueueNode::InputQueueNode(unsigned int maxSize, bool blocking) } void InputQueue::InputQueueNode::run() { - while(isRunning()) { + while(mainLoop()) { output.send(input.get()); } } diff --git a/src/pipeline/MessageQueue.cpp b/src/pipeline/MessageQueue.cpp index 28f082f164..4e906bb71b 100644 --- a/src/pipeline/MessageQueue.cpp +++ b/src/pipeline/MessageQueue.cpp @@ -14,10 +14,15 @@ // Additions #include "spdlog/fmt/bin_to_hex.h" #include "spdlog/fmt/chrono.h" +#include "utility/PipelineEventDispatcherInterface.hpp" namespace dai { -MessageQueue::MessageQueue(std::string name, unsigned int maxSize, bool blocking) : queue(maxSize, blocking), name(std::move(name)) {} +MessageQueue::MessageQueue(std::string name, + unsigned int maxSize, + bool blocking, + std::shared_ptr pipelineEventDispatcher) + : queue(maxSize, blocking), name(std::move(name)), pipelineEventDispatcher(pipelineEventDispatcher) {} MessageQueue::MessageQueue(unsigned int maxSize, bool blocking) : queue(maxSize, blocking) {} @@ -112,7 +117,20 @@ void MessageQueue::send(const std::shared_ptr& msg) { throw QueueException(CLOSED_QUEUE_MESSAGE); } callCallbacks(msg); - auto queueNotClosed = queue.push(msg); + auto queueNotClosed = queue.push(msg, [&](LockingQueueState state, size_t size) { + if(pipelineEventDispatcher) { + switch(state) { + case LockingQueueState::BLOCKED: + pipelineEventDispatcher->pingInputEvent(name, -1, size); + break; + case LockingQueueState::CANCELLED: + pipelineEventDispatcher->pingInputEvent(name, -2, size); + break; + case LockingQueueState::OK: + break; + } + } + }); if(!queueNotClosed) throw QueueException(CLOSED_QUEUE_MESSAGE); } @@ -122,7 +140,20 @@ bool MessageQueue::send(const std::shared_ptr& msg, std::chrono::mill if(queue.isDestroyed()) { throw QueueException(CLOSED_QUEUE_MESSAGE); } - return queue.tryWaitAndPush(msg, timeout); + return queue.tryWaitAndPush(msg, timeout, [&](LockingQueueState state, size_t size) { + if(pipelineEventDispatcher) { + switch(state) { + case LockingQueueState::BLOCKED: + pipelineEventDispatcher->pingInputEvent(name, -1, size); + break; + case LockingQueueState::CANCELLED: + pipelineEventDispatcher->pingInputEvent(name, -2, size); + break; + case LockingQueueState::OK: + break; + } + } + }); } bool MessageQueue::trySend(const std::shared_ptr& msg) { diff --git a/src/pipeline/Node.cpp b/src/pipeline/Node.cpp index 8f6a1ac90c..49490765dc 100644 --- a/src/pipeline/Node.cpp +++ b/src/pipeline/Node.cpp @@ -241,9 +241,11 @@ void Node::Output::send(const std::shared_ptr& msg) { // } // } // } + if(pipelineEventDispatcher) pipelineEventDispatcher->startOutputEvent(getName()); for(auto& messageQueue : connectedInputs) { messageQueue->send(msg); } + if(pipelineEventDispatcher) pipelineEventDispatcher->endOutputEvent(getName()); } bool Node::Output::trySend(const std::shared_ptr& msg) { @@ -263,9 +265,11 @@ bool Node::Output::trySend(const std::shared_ptr& msg) { // } // } // } + if(pipelineEventDispatcher) pipelineEventDispatcher->startOutputEvent(getName()); for(auto& messageQueue : connectedInputs) { success &= messageQueue->trySend(msg); } + if(pipelineEventDispatcher && success) pipelineEventDispatcher->endOutputEvent(getName()); return success; } @@ -316,9 +320,10 @@ Node::OutputMap::OutputMap(Node& parent, Node::OutputDescription defaultOutput, Node::Output& Node::OutputMap::operator[](const std::string& key) { if(count({name, key}) == 0) { // Create using default and rename with group and key - Output output(parent, defaultOutput, false); - output.setGroup(name); - output.setName(key); + auto desc = defaultOutput; + desc.group = name; + desc.name = key; + Output output(parent, desc, false); insert({{name, key}, output}); } // otherwise just return reference to existing @@ -327,11 +332,11 @@ Node::Output& Node::OutputMap::operator[](const std::string& key) { Node::Output& Node::OutputMap::operator[](std::pair groupKey) { if(count(groupKey) == 0) { // Create using default and rename with group and key - Output output(parent, defaultOutput, false); - + auto desc = defaultOutput; // Uses \t (tab) as a special character to parse out as subgroup name - output.setGroup(fmt::format("{}\t{}", name, groupKey.first)); - output.setName(groupKey.second); + desc.group = fmt::format("{}\t{}", name, groupKey.first); + desc.name = groupKey.second; + Output output(parent, desc, false); insert(std::make_pair(groupKey, output)); } // otherwise just return reference to existing @@ -348,9 +353,10 @@ Node::InputMap::InputMap(Node& parent, Node::InputDescription description) : Inp Node::Input& Node::InputMap::operator[](const std::string& key) { if(count({name, key}) == 0) { // Create using default and rename with group and key - Input input(parent, defaultInput, false); - input.setGroup(name); - input.setName(key); + auto desc = defaultInput; + desc.group = name; + desc.name = key; + Input input(parent, desc, false); insert({{name, key}, input}); } // otherwise just return reference to existing @@ -359,11 +365,11 @@ Node::Input& Node::InputMap::operator[](const std::string& key) { Node::Input& Node::InputMap::operator[](std::pair groupKey) { if(count(groupKey) == 0) { // Create using default and rename with group and key - Input input(parent, defaultInput, false); - + auto desc = defaultInput; // Uses \t (tab) as a special character to parse out as subgroup name - input.setGroup(fmt::format("{}\t{}", name, groupKey.first)); - input.setName(groupKey.second); + desc.group = fmt::format("{}\t{}", name, groupKey.first); + desc.name = groupKey.second; + Input input(parent, desc, false); insert(std::make_pair(groupKey, input)); } // otherwise just return reference to existing diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 7076d25048..9ed64bdeed 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -4,6 +4,7 @@ #include "depthai/device/CalibrationHandler.hpp" #include "depthai/pipeline/ThreadedHostNode.hpp" +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" #include "depthai/pipeline/node/internal/XLinkIn.hpp" #include "depthai/pipeline/node/internal/XLinkInHost.hpp" #include "depthai/pipeline/node/internal/XLinkOut.hpp" @@ -11,6 +12,7 @@ #include "depthai/utility/Initialization.hpp" #include "pipeline/datatype/ImgFrame.hpp" #include "pipeline/node/DetectionNetwork.hpp" +#include "pipeline/node/internal/PipelineStateMerge.hpp" #include "utility/Compression.hpp" #include "utility/Environment.hpp" #include "utility/ErrorMacros.hpp" @@ -72,6 +74,10 @@ PipelineSchema Pipeline::getPipelineSchema(SerializationType type) const { return pimpl->getPipelineSchema(type); } +PipelineSchema Pipeline::getDevicePipelineSchema(SerializationType type) const { + return pimpl->getDevicePipelineSchema(type); +} + GlobalProperties PipelineImpl::getGlobalProperties() const { return globalProperties; } @@ -115,7 +121,7 @@ std::vector> PipelineImpl::getSourceNodes() { void PipelineImpl::serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage, SerializationType type) const { // Set schema - schema = getPipelineSchema(type); + schema = getDevicePipelineSchema(type); // Serialize all asset managers into asset storage assetStorage.clear(); @@ -186,6 +192,7 @@ std::vector PipelineImpl::getConnections() const { PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { PipelineSchema schema; schema.globalProperties = globalProperties; + schema.bridges = xlinkBridges; int latestIoId = 0; // Loop over all nodes, and add them to schema for(const auto& node : getAllNodes()) { @@ -193,94 +200,91 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) { continue; } - // Check if its a host node or device node - if(node->runOnHost()) { - // host node, no need to serialize to a schema - // TBD any additional changes - } else { - // Create 'node' info - NodeObjInfo info; - info.id = node->id; - info.name = node->getName(); - info.alias = node->getAlias(); - info.parentId = node->parentId; - const auto& deviceNode = std::dynamic_pointer_cast(node); - if(!deviceNode) { - throw std::invalid_argument(fmt::format("Node '{}' should subclass DeviceNode or have hostNode == true", info.name)); - } + // Create 'node' info + NodeObjInfo info; + info.id = node->id; + info.name = node->getName(); + info.alias = node->getAlias(); + info.parentId = node->parentId; + info.device = node->runOnHost() ? "host" : "device"; + const auto& deviceNode = std::dynamic_pointer_cast(node); + if(!node->runOnHost() && !deviceNode) { + throw std::invalid_argument(fmt::format("Node '{}' should subclass DeviceNode or have hostNode == true", info.name)); + } + if(deviceNode) { deviceNode->getProperties().serialize(info.properties, type); info.logLevel = deviceNode->getLogLevel(); - // Create Io information - auto inputs = node->getInputs(); - auto outputs = node->getOutputs(); - - info.ioInfo.reserve(inputs.size() + outputs.size()); - - // Add inputs - for(const auto& input : inputs) { - NodeIoInfo io; - io.id = latestIoId; - latestIoId++; - io.blocking = input.getBlocking(); - io.queueSize = input.getMaxSize(); - io.name = input.getName(); - io.group = input.getGroup(); - auto ioKey = std::make_tuple(io.group, io.name); - - io.waitForMessage = input.getWaitForMessage(); - switch(input.getType()) { - case Node::Input::Type::MReceiver: - io.type = NodeIoInfo::Type::MReceiver; - break; - case Node::Input::Type::SReceiver: - io.type = NodeIoInfo::Type::SReceiver; - break; - } + } + // Create Io information + auto inputs = node->getInputs(); + auto outputs = node->getOutputs(); + + info.ioInfo.reserve(inputs.size() + outputs.size()); + + // Add inputs + for(const auto& input : inputs) { + NodeIoInfo io; + io.id = latestIoId; + latestIoId++; + io.blocking = input.getBlocking(); + io.queueSize = input.getMaxSize(); + io.name = input.getName(); + io.group = input.getGroup(); + auto ioKey = std::make_tuple(io.group, io.name); + + io.waitForMessage = input.getWaitForMessage(); + switch(input.getType()) { + case Node::Input::Type::MReceiver: + io.type = NodeIoInfo::Type::MReceiver; + break; + case Node::Input::Type::SReceiver: + io.type = NodeIoInfo::Type::SReceiver; + break; + } - if(info.ioInfo.count(ioKey) > 0) { - if(io.group == "") { - throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); - } else { - throw std::invalid_argument( - fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); - } + if(info.ioInfo.count(ioKey) > 0) { + if(io.group == "") { + throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); + } else { + throw std::invalid_argument( + fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); } - info.ioInfo[ioKey] = io; } + info.ioInfo[ioKey] = io; + } - // Add outputs - for(const auto& output : outputs) { - NodeIoInfo io; - io.id = latestIoId; - latestIoId++; - io.blocking = false; - io.name = output.getName(); - io.group = output.getGroup(); - auto ioKey = std::make_tuple(io.group, io.name); - - switch(output.getType()) { - case Node::Output::Type::MSender: - io.type = NodeIoInfo::Type::MSender; - break; - case Node::Output::Type::SSender: - io.type = NodeIoInfo::Type::SSender; - break; - } + // Add outputs + for(const auto& output : outputs) { + NodeIoInfo io; + io.id = latestIoId; + latestIoId++; + io.blocking = false; + io.name = output.getName(); + io.group = output.getGroup(); + auto ioKey = std::make_tuple(io.group, io.name); + + switch(output.getType()) { + case Node::Output::Type::MSender: + io.type = NodeIoInfo::Type::MSender; + break; + case Node::Output::Type::SSender: + io.type = NodeIoInfo::Type::SSender; + break; + } - if(info.ioInfo.count(ioKey) > 0) { - if(io.group == "") { - throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); - } else { - throw std::invalid_argument( - fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); - } + if(info.ioInfo.count(ioKey) > 0) { + if(io.group == "") { + throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); + } else { + throw std::invalid_argument( + fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); } - info.ioInfo[ioKey] = io; } - - // At the end, add the constructed node information to the schema - schema.nodes[info.id] = info; + info.ioInfo[ioKey] = io; } + + // At the end, add the constructed node information to the schema + schema.nodes[info.id] = info; } // Create 'connections' info @@ -312,11 +316,6 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { bool outputHost = outNode->runOnHost(); bool inputHost = inNode->runOnHost(); - if(outputHost && inputHost) { - // skip - connection between host nodes doesn't have to be represented to the device - continue; - } - if(outputHost && !inputHost) { throw std::invalid_argument( fmt::format("Connection from host node '{}' to device node '{}' is not allowed during serialization.", outNode->getName(), inNode->getName())); @@ -334,6 +333,35 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { return schema; } +PipelineSchema PipelineImpl::getDevicePipelineSchema(SerializationType type) const { + auto schema = getPipelineSchema(type); + // Remove bridge info + schema.bridges.clear(); + // Remove host nodes + for(auto it = schema.nodes.begin(); it != schema.nodes.end();) { + if(it->second.device != "device") { + it = schema.nodes.erase(it); + } else { + ++it; + } + } + // Remove connections between host nodes (host - device connections should not exist) + schema.connections.erase(std::remove_if(schema.connections.begin(), + schema.connections.end(), + [&schema](const NodeConnectionSchema& c) { + auto node1 = schema.nodes.find(c.node1Id); + auto node2 = schema.nodes.find(c.node2Id); + if(node1 == schema.nodes.end() && node2 == schema.nodes.end()) { + return true; + } else if(node1 == schema.nodes.end() || node2 == schema.nodes.end()) { + throw std::invalid_argument("Connection from host node to device node should not exist here"); + } + return false; + }), + schema.connections.end()); + return schema; +} + Device::Config PipelineImpl::getDeviceConfig() const { Device::Config config; config.board = board; @@ -477,6 +505,10 @@ bool PipelineImpl::isDeviceOnly() const { return deviceOnly; } +PipelineStateApi PipelineImpl::getPipelineState() { + return PipelineStateApi(pipelineStateOut, pipelineStateRequest, getAllNodes()); +} + void PipelineImpl::add(std::shared_ptr node) { if(node == nullptr) { throw std::invalid_argument(fmt::format("Given node pointer is null")); @@ -536,96 +568,146 @@ bool PipelineImpl::isBuilt() const { void PipelineImpl::build() { // TODO(themarpe) - add mutex and set running up ahead if(isBuild) return; - isBuild = true; - if(defaultDevice) { - auto recordPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_RECORD", "")); - auto replayPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_REPLAY", "")); + if(pipelineOnHost) { + if(defaultDevice) { + auto recordPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_RECORD", "")); + auto replayPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_REPLAY", "")); - if(defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_RVC4) { - try { + if(defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_RVC4) { + try { #ifdef DEPTHAI_MERGED_TARGET - if(enableHolisticRecordReplay) { - switch(recordConfig.state) { - case RecordConfig::RecordReplayState::RECORD: - recordPath = recordConfig.outputDir; - replayPath = ""; - break; - case RecordConfig::RecordReplayState::REPLAY: - recordPath = ""; - replayPath = recordConfig.outputDir; - break; - case RecordConfig::RecordReplayState::NONE: - enableHolisticRecordReplay = false; - break; + if(enableHolisticRecordReplay) { + switch(recordConfig.state) { + case RecordConfig::RecordReplayState::RECORD: + recordPath = recordConfig.outputDir; + replayPath = ""; + break; + case RecordConfig::RecordReplayState::REPLAY: + recordPath = ""; + replayPath = recordConfig.outputDir; + break; + case RecordConfig::RecordReplayState::NONE: + enableHolisticRecordReplay = false; + break; + } } - } - defaultDeviceId = defaultDevice->getDeviceId(); - - if(!recordPath.empty() && !replayPath.empty()) { - Logging::getInstance().logger.warn("Both DEPTHAI_RECORD and DEPTHAI_REPLAY are set. Record and replay disabled."); - } else if(!recordPath.empty()) { - if(enableHolisticRecordReplay || utility::checkRecordConfig(recordPath, recordConfig)) { - if(platform::checkWritePermissions(recordPath)) { - if(utility::setupHolisticRecord(parent, - defaultDeviceId, - recordConfig, - recordReplayFilenames, - defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { - recordConfig.state = RecordConfig::RecordReplayState::RECORD; - Logging::getInstance().logger.info("Record enabled."); + defaultDeviceId = defaultDevice->getDeviceId(); + + if(!recordPath.empty() && !replayPath.empty()) { + Logging::getInstance().logger.warn("Both DEPTHAI_RECORD and DEPTHAI_REPLAY are set. Record and replay disabled."); + } else if(!recordPath.empty()) { + if(enableHolisticRecordReplay || utility::checkRecordConfig(recordPath, recordConfig)) { + if(platform::checkWritePermissions(recordPath)) { + if(utility::setupHolisticRecord(parent, + defaultDeviceId, + recordConfig, + recordReplayFilenames, + defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { + recordConfig.state = RecordConfig::RecordReplayState::RECORD; + Logging::getInstance().logger.info("Record enabled."); + } else { + Logging::getInstance().logger.warn("Could not set up holistic record. Record and replay disabled."); + } } else { - Logging::getInstance().logger.warn("Could not set up holistic record. Record and replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_RECORD path does not have write permissions. Record disabled."); } } else { - Logging::getInstance().logger.warn("DEPTHAI_RECORD path does not have write permissions. Record disabled."); + Logging::getInstance().logger.warn("Could not successfully parse DEPTHAI_RECORD. Record disabled."); } - } else { - Logging::getInstance().logger.warn("Could not successfully parse DEPTHAI_RECORD. Record disabled."); - } - } else if(!replayPath.empty()) { - if(platform::checkPathExists(replayPath)) { - if(platform::checkWritePermissions(replayPath)) { - if(utility::setupHolisticReplay(parent, - replayPath, - defaultDeviceId, - recordConfig, - recordReplayFilenames, - defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { - recordConfig.state = RecordConfig::RecordReplayState::REPLAY; - if(platform::checkPathExists(replayPath, true)) { - removeRecordReplayFiles = false; + } else if(!replayPath.empty()) { + if(platform::checkPathExists(replayPath)) { + if(platform::checkWritePermissions(replayPath)) { + if(utility::setupHolisticReplay(parent, + replayPath, + defaultDeviceId, + recordConfig, + recordReplayFilenames, + defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { + recordConfig.state = RecordConfig::RecordReplayState::REPLAY; + if(platform::checkPathExists(replayPath, true)) { + removeRecordReplayFiles = false; + } + Logging::getInstance().logger.info("Replay enabled."); + } else { + Logging::getInstance().logger.warn("Could not set up holistic replay. Record and replay disabled."); } - Logging::getInstance().logger.info("Replay enabled."); } else { - Logging::getInstance().logger.warn("Could not set up holistic replay. Record and replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not have write permissions. Replay disabled."); } } else { - Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not have write permissions. Replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not exist or is invalid. Replay disabled."); } - } else { - Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not exist or is invalid. Replay disabled."); } - } #else - recordConfig.state = RecordConfig::RecordReplayState::NONE; - if(!recordPath.empty() || !replayPath.empty()) { - Logging::getInstance().logger.warn("Merged target is required to use holistic record/replay."); - } + recordConfig.state = RecordConfig::RecordReplayState::NONE; + if(!recordPath.empty() || !replayPath.empty()) { + Logging::getInstance().logger.warn("Merged target is required to use holistic record/replay."); + } #endif - } catch(std::runtime_error& e) { - Logging::getInstance().logger.warn("Could not set up record / replay: {}", e.what()); + } catch(std::runtime_error& e) { + Logging::getInstance().logger.warn("Could not set up record / replay: {}", e.what()); + } + } else if(enableHolisticRecordReplay || !recordPath.empty() || !replayPath.empty()) { + throw std::runtime_error("Holistic record/replay is only supported on RVC2 devices for now."); + } + } + + // Create pipeline event aggregator node and link + // Check if any nodes are on host or device + bool hasHostNodes = false; + bool hasDeviceNodes = false; + for(const auto& node : getAllNodes()) { + if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; + + if(node->runOnHost()) { + hasHostNodes = true; + } else { + hasDeviceNodes = true; } - } else if(enableHolisticRecordReplay || !recordPath.empty() || !replayPath.empty()) { - throw std::runtime_error("Holistic record/replay is only supported on RVC2 devices for now."); } + std::shared_ptr hostEventAgg = nullptr; + std::shared_ptr deviceEventAgg = nullptr; + if(hasHostNodes) { + hostEventAgg = parent.create(); + hostEventAgg->setRunOnHost(true); + } + if(hasDeviceNodes) { + deviceEventAgg = parent.create(); + deviceEventAgg->setRunOnHost(false); + } + for(auto& node : getAllNodes()) { + if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; + + auto threadedNode = std::dynamic_pointer_cast(node); + if(threadedNode) { + if(node->runOnHost() && hostEventAgg && node->id != hostEventAgg->id) { + threadedNode->pipelineEventOutput.link(hostEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } else if(!node->runOnHost() && deviceEventAgg && node->id != deviceEventAgg->id) { + threadedNode->pipelineEventOutput.link(deviceEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } + } + } + auto stateMerge = parent.create()->build(hasDeviceNodes, hasHostNodes); + if(deviceEventAgg) { + deviceEventAgg->out.link(stateMerge->inputDevice); + stateMerge->outRequest.link(deviceEventAgg->request); + } + if(hostEventAgg) { + hostEventAgg->out.link(stateMerge->inputHost); + stateMerge->outRequest.link(hostEventAgg->request); + } + pipelineStateOut = stateMerge->out.createOutputQueue(1, false); + pipelineStateRequest = stateMerge->request.createInputQueue(); } + isBuild = true; + // Go through the build stages sequentially for(const auto& node : getAllNodes()) { node->buildStage1(); @@ -703,6 +785,9 @@ void PipelineImpl::build() { xLinkBridge.xLinkInHost->setStreamName(streamName); xLinkBridge.xLinkInHost->setConnection(defaultDevice->getConnection()); connection.out->link(xLinkBridge.xLinkOut->input); + + // Note the created bridge for serialization (for visualization) + xlinkBridges.push_back({outNode->id, inNode->id}); } auto xLinkBridge = bridgesOut[connection.out]; connection.out->unlink(*connection.in); // Unlink the connection @@ -1037,4 +1122,9 @@ void Pipeline::enableHolisticReplay(const std::string& pathToRecording) { impl()->recordConfig.state = RecordConfig::RecordReplayState::REPLAY; impl()->enableHolisticRecordReplay = true; } + +PipelineStateApi Pipeline::getPipelineState() { + return impl()->getPipelineState(); +} + } // namespace dai diff --git a/src/pipeline/PipelineStateApi.cpp b/src/pipeline/PipelineStateApi.cpp new file mode 100644 index 0000000000..35beb756d9 --- /dev/null +++ b/src/pipeline/PipelineStateApi.cpp @@ -0,0 +1,271 @@ +#include "depthai/pipeline/PipelineStateApi.hpp" + +#include "depthai/pipeline/InputQueue.hpp" + +namespace dai { + +PipelineState NodesStateApi::summary() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.events = false; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + return *state; +} +PipelineState NodesStateApi::detailed() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + return *state; +} +std::unordered_map> NodesStateApi::outputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.outputStates; + } + return result; +} +std::unordered_map> NodesStateApi::inputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.inputStates; + } + return result; +} +std::unordered_map> NodesStateApi::otherTimings() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.outputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.otherTimings; + } + return result; +} + +std::unordered_map NodeStateApi::outputs(const std::vector& outputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = outputNames; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& outputName : outputNames) { + result[outputName] = state->nodeStates[nodeId].outputStates[outputName]; + } + return result; +} +NodeState::OutputQueueState NodeStateApi::outputs(const std::string& outputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {outputName}; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].outputStates.find(outputName) == state->nodeStates[nodeId].outputStates.end()) { + throw std::runtime_error("Output name " + outputName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].outputStates[outputName]; +} +std::vector NodeStateApi::events() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = true; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + return state->nodeStates[nodeId].events; +} +std::unordered_map NodeStateApi::inputs(const std::vector& inputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = inputNames; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& inputName : inputNames) { + result[inputName] = state->nodeStates[nodeId].inputStates[inputName]; + } + return result; +} +NodeState::InputQueueState NodeStateApi::inputs(const std::string& inputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = {inputName}; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].inputStates.find(inputName) == state->nodeStates[nodeId].inputStates.end()) { + throw std::runtime_error("Input name " + inputName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].inputStates[inputName]; +} +std::unordered_map NodeStateApi::otherTimings(const std::vector& statNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = statNames; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& otherName : statNames) { + result[otherName] = state->nodeStates[nodeId].otherTimings[otherName]; + } + return result; +} +NodeState::Timing NodeStateApi::otherStats(const std::string& statName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = {statName}; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].otherTimings.find(statName) == state->nodeStates[nodeId].otherTimings.end()) { + throw std::runtime_error("Stat name " + statName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].otherTimings[statName]; +} + +} // namespace dai diff --git a/src/pipeline/ThreadedNode.cpp b/src/pipeline/ThreadedNode.cpp index bd46872523..b86ad8783b 100644 --- a/src/pipeline/ThreadedNode.cpp +++ b/src/pipeline/ThreadedNode.cpp @@ -2,6 +2,9 @@ #include +#include + +#include "depthai/utility/PipelineEventDispatcher.hpp" #include "pipeline/ThreadedNodeImpl.hpp" #include "utility/Environment.hpp" #include "utility/ErrorMacros.hpp" @@ -18,11 +21,17 @@ ThreadedNode::ThreadedNode() { level = Logging::parseLevel(envLevel); } pimpl->logger->set_level(level); + pipelineEventDispatcher = std::make_shared(&pipelineEventOutput); +} + +void ThreadedNode::initPipelineEventDispatcher(int64_t nodeId) { + pipelineEventDispatcher->setNodeId(nodeId); } ThreadedNode::~ThreadedNode() = default; void ThreadedNode::start() { + initPipelineEventDispatcher(this->id); // A node should not be started if it is already running // We would be creating multiple threads for the same node DAI_CHECK_V(!isRunning(), "Node with id {} is already running. Cannot start it again. Node name: {}", id, getName()); @@ -86,4 +95,21 @@ bool ThreadedNode::isRunning() const { return running; } +bool ThreadedNode::mainLoop() { + this->pipelineEventDispatcher->pingMainLoopEvent(); + return isRunning(); +} + +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::blockEvent(PipelineEvent::Type type, const std::string& source) { + return pipelineEventDispatcher->blockEvent(type, source); +} +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::inputBlockEvent(const std::string& source) { + // Just for convenience due to the default source + return blockEvent(PipelineEvent::Type::INPUT_BLOCK, source); +} +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::outputBlockEvent(const std::string& source) { + // Just for convenience due to the default source + return blockEvent(PipelineEvent::Type::OUTPUT_BLOCK, source); +} + } // namespace dai diff --git a/src/pipeline/datatype/DatatypeEnum.cpp b/src/pipeline/datatype/DatatypeEnum.cpp index 32ec647eb0..2d3a3e8ffc 100644 --- a/src/pipeline/datatype/DatatypeEnum.cpp +++ b/src/pipeline/datatype/DatatypeEnum.cpp @@ -36,6 +36,9 @@ const std::unordered_map> hierarchy = { DatatypeEnum::AprilTags, DatatypeEnum::BenchmarkReport, DatatypeEnum::MessageGroup, + DatatypeEnum::PipelineEvent, + DatatypeEnum::PipelineState, + DatatypeEnum::PipelineEventAggregationConfig, DatatypeEnum::PointCloudConfig, DatatypeEnum::PointCloudData, DatatypeEnum::RGBDData, @@ -73,6 +76,9 @@ const std::unordered_map> hierarchy = { DatatypeEnum::AprilTags, DatatypeEnum::BenchmarkReport, DatatypeEnum::MessageGroup, + DatatypeEnum::PipelineEvent, + DatatypeEnum::PipelineState, + DatatypeEnum::PipelineEventAggregationConfig, DatatypeEnum::PointCloudConfig, DatatypeEnum::PointCloudData, DatatypeEnum::RGBDData, @@ -108,6 +114,9 @@ const std::unordered_map> hierarchy = { {DatatypeEnum::AprilTags, {}}, {DatatypeEnum::BenchmarkReport, {}}, {DatatypeEnum::MessageGroup, {}}, + {DatatypeEnum::PipelineEvent, {}}, + {DatatypeEnum::PipelineState, {}}, + {DatatypeEnum::PipelineEventAggregationConfig, {}}, {DatatypeEnum::PointCloudConfig, {}}, {DatatypeEnum::PointCloudData, {}}, {DatatypeEnum::RGBDData, {}}, diff --git a/src/pipeline/datatype/PipelineEvent.cpp b/src/pipeline/datatype/PipelineEvent.cpp new file mode 100644 index 0000000000..4d71213361 --- /dev/null +++ b/src/pipeline/datatype/PipelineEvent.cpp @@ -0,0 +1,4 @@ +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +} // namespace dai diff --git a/src/pipeline/datatype/PipelineEventAggregationConfig.cpp b/src/pipeline/datatype/PipelineEventAggregationConfig.cpp new file mode 100644 index 0000000000..8ef38e1ba1 --- /dev/null +++ b/src/pipeline/datatype/PipelineEventAggregationConfig.cpp @@ -0,0 +1,12 @@ +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" + +namespace dai { + +PipelineEventAggregationConfig::~PipelineEventAggregationConfig() = default; + +void PipelineEventAggregationConfig::serialize(std::vector& metadata, DatatypeEnum& datatype) const { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineEventAggregationConfig; +} + +} // namespace dai diff --git a/src/pipeline/datatype/PipelineState.cpp b/src/pipeline/datatype/PipelineState.cpp new file mode 100644 index 0000000000..f45bb47e95 --- /dev/null +++ b/src/pipeline/datatype/PipelineState.cpp @@ -0,0 +1,4 @@ +#include "depthai/pipeline/datatype/PipelineState.hpp" + +namespace dai { +} // namespace dai diff --git a/src/pipeline/datatype/StreamMessageParser.cpp b/src/pipeline/datatype/StreamMessageParser.cpp index 660afa8329..f25884b02d 100644 --- a/src/pipeline/datatype/StreamMessageParser.cpp +++ b/src/pipeline/datatype/StreamMessageParser.cpp @@ -15,6 +15,9 @@ #include "depthai/pipeline/datatype/BenchmarkReport.hpp" #include "depthai/pipeline/datatype/Buffer.hpp" #include "depthai/pipeline/datatype/CameraControl.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" #ifdef DEPTHAI_HAVE_DYNAMIC_CALIBRATION_SUPPORT #include "depthai/pipeline/datatype/DynamicCalibrationControl.hpp" #include "depthai/pipeline/datatype/DynamicCalibrationResults.hpp" @@ -238,6 +241,15 @@ std::shared_ptr StreamMessageParser::parseMessage(streamPacketDesc_t* case DatatypeEnum::PointCloudData: return parseDatatype(metadataStart, serializedObjectSize, data, fd); break; + case DatatypeEnum::PipelineEvent: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; + case DatatypeEnum::PipelineState: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; + case DatatypeEnum::PipelineEventAggregationConfig: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; case DatatypeEnum::MessageGroup: return parseDatatype(metadataStart, serializedObjectSize, data, fd); break; diff --git a/src/pipeline/node/AprilTag.cpp b/src/pipeline/node/AprilTag.cpp index 7fe371e2a7..765af8d429 100644 --- a/src/pipeline/node/AprilTag.cpp +++ b/src/pipeline/node/AprilTag.cpp @@ -206,7 +206,7 @@ void AprilTag::run() { // Handle possible errors during configuration handleErrors(errno); - while(isRunning()) { + while(mainLoop()) { // Retrieve config from user if available if(properties.inputConfigSync) { inConfig = inputConfig.get(); diff --git a/src/pipeline/node/BenchmarkIn.cpp b/src/pipeline/node/BenchmarkIn.cpp index 539f748a4f..634a0153dc 100644 --- a/src/pipeline/node/BenchmarkIn.cpp +++ b/src/pipeline/node/BenchmarkIn.cpp @@ -55,7 +55,7 @@ void BenchmarkIn::run() { auto start = steady_clock::now(); - while(isRunning()) { + while(mainLoop()) { auto inMessage = input.get(); // If this is the first message of the batch, reset counters diff --git a/src/pipeline/node/DynamicCalibrationNode.cpp b/src/pipeline/node/DynamicCalibrationNode.cpp index cd32d0f910..c9da5c484c 100644 --- a/src/pipeline/node/DynamicCalibrationNode.cpp +++ b/src/pipeline/node/DynamicCalibrationNode.cpp @@ -557,7 +557,7 @@ void DynamicCalibration::run() { auto previousLoadingTimeFloat = std::chrono::steady_clock::now() + std::chrono::duration(calibrationPeriod); auto previousLoadingTime = std::chrono::time_point_cast(previousLoadingTimeFloat); initializePipeline(device); - while(isRunning()) { + while(mainLoop()) { slept = false; doWork(previousLoadingTime); if(!slept) { diff --git a/src/pipeline/node/ImageAlign.cpp b/src/pipeline/node/ImageAlign.cpp index 794001de84..d1da2a9602 100644 --- a/src/pipeline/node/ImageAlign.cpp +++ b/src/pipeline/node/ImageAlign.cpp @@ -353,7 +353,7 @@ void ImageAlign::run() { ImgFrame inputAlignToImgFrame; uint32_t currentEepromId = getParentPipeline().getEepromId(); - while(isRunning()) { + while(mainLoop()) { auto inputImg = input.get(); if(!initialized) { @@ -585,4 +585,4 @@ void ImageAlign::run() { #endif // DEPTHAI_HAVE_OPENCV_SUPPORT } // namespace node -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/src/pipeline/node/ImageFilters.cpp b/src/pipeline/node/ImageFilters.cpp index 5c57572015..b474d6284b 100644 --- a/src/pipeline/node/ImageFilters.cpp +++ b/src/pipeline/node/ImageFilters.cpp @@ -818,7 +818,7 @@ void ImageFilters::run() { filters.size() == 0, getFilterPipelineString()); - while(isRunning()) { + while(mainLoop()) { // Set config while(inputConfig.has()) { auto configMsg = inputConfig.get(); @@ -946,7 +946,7 @@ void ToFDepthConfidenceFilter::applyDepthConfidenceFilter(std::shared_ptr(); diff --git a/src/pipeline/node/ObjectTracker.cpp b/src/pipeline/node/ObjectTracker.cpp index 1756c40e78..fd2ee507f3 100644 --- a/src/pipeline/node/ObjectTracker.cpp +++ b/src/pipeline/node/ObjectTracker.cpp @@ -98,7 +98,7 @@ void ObjectTracker::run() { impl::OCSTracker tracker(properties); - while(isRunning()) { + while(mainLoop()) { std::shared_ptr inputTrackerImg; std::shared_ptr inputDetectionImg; std::shared_ptr inputImgDetections; diff --git a/src/pipeline/node/Sync.cpp b/src/pipeline/node/Sync.cpp index 5d00489698..c22f82af1d 100644 --- a/src/pipeline/node/Sync.cpp +++ b/src/pipeline/node/Sync.cpp @@ -50,7 +50,7 @@ void Sync::run() { auto syncThresholdNs = properties.syncThresholdNs; logger->trace("Sync threshold: {}", syncThresholdNs); - while(isRunning()) { + while(mainLoop()) { auto tAbsoluteBeginning = steady_clock::now(); std::unordered_map> inputFrames; for(auto name : inputNames) { diff --git a/src/pipeline/node/host/Display.cpp b/src/pipeline/node/host/Display.cpp index 257a4fc8fc..1e7488a88c 100644 --- a/src/pipeline/node/host/Display.cpp +++ b/src/pipeline/node/host/Display.cpp @@ -37,7 +37,7 @@ Display::Display(std::string name) : name(std::move(name)) {} void Display::run() { auto fpsCounter = FPSCounter(); - while(isRunning()) { + while(mainLoop()) { std::shared_ptr imgFrame = input.get(); if(imgFrame != nullptr) { fpsCounter.update(); diff --git a/src/pipeline/node/host/HostCamera.cpp b/src/pipeline/node/host/HostCamera.cpp index 7b5cfac4cd..f698f9d26e 100644 --- a/src/pipeline/node/host/HostCamera.cpp +++ b/src/pipeline/node/host/HostCamera.cpp @@ -13,7 +13,7 @@ void HostCamera::run() { throw std::runtime_error("Couldn't open camera"); } int64_t seqNum = 0; - while(isRunning()) { + while(mainLoop()) { cv::Mat frame; auto success = cap.read(frame); if(frame.empty() || !success) { @@ -36,4 +36,4 @@ void HostCamera::run() { } } // namespace node -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/src/pipeline/node/host/HostNode.cpp b/src/pipeline/node/host/HostNode.cpp index 885a3a5336..e5d6522b35 100644 --- a/src/pipeline/node/host/HostNode.cpp +++ b/src/pipeline/node/host/HostNode.cpp @@ -17,7 +17,7 @@ void HostNode::buildStage1() { } void HostNode::run() { - while(isRunning()) { + while(mainLoop()) { // Get input auto in = input.get(); // Create a lambda that captures the class as a shared pointer and the message diff --git a/src/pipeline/node/host/RGBD.cpp b/src/pipeline/node/host/RGBD.cpp index 92fd67d844..5f964b1b98 100644 --- a/src/pipeline/node/host/RGBD.cpp +++ b/src/pipeline/node/host/RGBD.cpp @@ -363,7 +363,7 @@ void RGBD::initialize(std::shared_ptr frames) { } void RGBD::run() { - while(isRunning()) { + while(mainLoop()) { if(!pcl.getQueueConnections().empty() || !pcl.getConnections().empty() || !rgbd.getQueueConnections().empty() || !rgbd.getConnections().empty()) { // Get the color and depth frames auto group = inSync.get(); diff --git a/src/pipeline/node/host/Record.cpp b/src/pipeline/node/host/Record.cpp index 4604d2bf7f..36f6316d53 100644 --- a/src/pipeline/node/host/Record.cpp +++ b/src/pipeline/node/host/Record.cpp @@ -53,7 +53,7 @@ void RecordVideo::run() { unsigned int i = 0; auto start = std::chrono::steady_clock::now(); auto end = std::chrono::steady_clock::now(); - while(isRunning()) { + while(mainLoop()) { auto msg = input.get(); if(msg == nullptr) continue; if(streamType == DatatypeEnum::ADatatype) { @@ -150,7 +150,7 @@ void RecordMetadataOnly::run() { utility::ByteRecorder byteRecorder; DatatypeEnum streamType = DatatypeEnum::ADatatype; - while(isRunning()) { + while(mainLoop()) { auto msg = input.get(); if(msg == nullptr) continue; if(streamType == DatatypeEnum::ADatatype) { diff --git a/src/pipeline/node/host/Replay.cpp b/src/pipeline/node/host/Replay.cpp index 49e8b5b0b0..f35f5dfb98 100644 --- a/src/pipeline/node/host/Replay.cpp +++ b/src/pipeline/node/host/Replay.cpp @@ -97,6 +97,9 @@ inline std::shared_ptr getMessage(const std::shared_ptr getProtoMessage(utility::ByteP case DatatypeEnum::DynamicCalibrationResult: case DatatypeEnum::CalibrationQuality: case DatatypeEnum::CoverageData: + case DatatypeEnum::PipelineEvent: + case DatatypeEnum::PipelineState: + case DatatypeEnum::PipelineEventAggregationConfig: throw std::runtime_error("Cannot replay message type: " + std::to_string((int)datatype)); } return {}; @@ -208,7 +214,7 @@ void ReplayVideo::run() { uint64_t index = 0; auto loopStart = std::chrono::steady_clock::now(); auto prevMsgTs = loopStart; - while(isRunning()) { + while(mainLoop()) { std::shared_ptr metadata; std::vector frame; if(hasMetadata) { @@ -335,7 +341,7 @@ void ReplayMetadataOnly::run() { bool first = true; auto loopStart = std::chrono::steady_clock::now(); auto prevMsgTs = loopStart; - while(isRunning()) { + while(mainLoop()) { std::shared_ptr metadata; std::vector frame; if(!utility::deserializationSupported(datatype)) { diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp new file mode 100644 index 0000000000..31243a82ce --- /dev/null +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -0,0 +1,524 @@ +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" + +#include +#include + +#include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" +#include "depthai/utility/CircularBuffer.hpp" +#include "pipeline/ThreadedNodeImpl.hpp" + +namespace dai { +namespace node { +namespace internal { + +class NodeEventAggregation { + private: + struct FpsMeasurement { + std::chrono::steady_clock::time_point time; + int64_t sequenceNum; + }; + + std::shared_ptr logger; + + uint32_t windowSize; + uint32_t eventBatchSize; + uint32_t eventWaitWindow; + + public: + NodeEventAggregation(uint32_t windowSize, uint32_t eventBatchSize, uint32_t eventWaitWindow, std::shared_ptr logger) + : logger(logger), windowSize(windowSize), eventBatchSize(eventBatchSize), eventWaitWindow(eventWaitWindow), eventsBuffer(windowSize) {} + NodeState state; + utility::CircularBuffer eventsBuffer; + std::unordered_map>> inputTimingsBuffers; + std::unordered_map>> outputTimingsBuffers; + std::unique_ptr> inputsGetTimingsBuffer; + std::unique_ptr> outputsSendTimingsBuffer; + std::unique_ptr> mainLoopTimingsBuffer; + std::unordered_map>> otherTimingsBuffers; + + std::unordered_map>> inputQueueSizesBuffers; + + std::unordered_map>> inputFpsBuffers; + std::unordered_map>> outputFpsBuffers; + std::unique_ptr> inputsGetFpsBuffer; + std::unique_ptr> outputsSendFpsBuffer; + std::unordered_map>> otherFpsBuffers; + + std::unordered_map>>> ongoingInputEvents; + std::unordered_map>>> ongoingOutputEvents; + std::unique_ptr>> ongoingGetInputsEvents; + std::unique_ptr>> ongoingSendOutputsEvents; + std::unique_ptr>> ongoingMainLoopEvents; + std::unordered_map>>> ongoingOtherEvents; + + uint32_t count = 0; + + private: + inline std::optional* findOngoingEvent(uint32_t sequenceNum, utility::CircularBuffer>& buffer) { + for(auto rit = buffer.rbegin(); rit != buffer.rend(); ++rit) { + if(rit->has_value() && rit->value().sequenceNum == sequenceNum) { + return &(*rit); + } else if(rit->has_value() && rit->value().sequenceNum < sequenceNum) { + break; + } + } + return nullptr; + } + + inline bool updateIntervalBuffers(PipelineEvent& event) { + using namespace std::chrono; + std::unique_ptr> emptyIntBuffer; + std::unique_ptr> emptyTimeBuffer; + + auto& ongoingEvents = [&]() -> std::unique_ptr>>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + throw std::runtime_error("LOOP event should not be an interval"); + case PipelineEvent::Type::INPUT: + return ongoingInputEvents[event.source]; + case PipelineEvent::Type::OUTPUT: + return ongoingOutputEvents[event.source]; + case PipelineEvent::Type::CUSTOM: + return ongoingOtherEvents[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return ongoingGetInputsEvents; + case PipelineEvent::Type::OUTPUT_BLOCK: + return ongoingSendOutputsEvents; + } + return ongoingMainLoopEvents; // To silence compiler warning + }(); + auto& timingsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + throw std::runtime_error("LOOP event should not be an interval"); + case PipelineEvent::Type::INPUT: + return inputTimingsBuffers[event.source]; + case PipelineEvent::Type::OUTPUT: + return outputTimingsBuffers[event.source]; + case PipelineEvent::Type::CUSTOM: + return otherTimingsBuffers[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return inputsGetTimingsBuffer; + case PipelineEvent::Type::OUTPUT_BLOCK: + return outputsSendTimingsBuffer; + } + return emptyIntBuffer; // To silence compiler warning + }(); + auto& fpsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + throw std::runtime_error("LOOP event should not be an interval"); + case PipelineEvent::Type::INPUT: + return inputFpsBuffers[event.source]; + case PipelineEvent::Type::OUTPUT: + return outputFpsBuffers[event.source]; + case PipelineEvent::Type::CUSTOM: + return otherFpsBuffers[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return inputsGetFpsBuffer; + case PipelineEvent::Type::OUTPUT_BLOCK: + return outputsSendFpsBuffer; + } + return emptyTimeBuffer; // To silence compiler warning + }(); + + if(ongoingEvents == nullptr) ongoingEvents = std::make_unique>>(eventWaitWindow); + if(timingsBuffer == nullptr) timingsBuffer = std::make_unique>(windowSize); + if(fpsBuffer == nullptr) fpsBuffer = std::make_unique>(windowSize); + + auto* ongoingEvent = findOngoingEvent(event.sequenceNum, *ongoingEvents); + + if(ongoingEvent && ongoingEvent->has_value() && event.interval == PipelineEvent::Interval::END) { + // End event + NodeState::DurationEvent durationEvent; + durationEvent.startEvent = ongoingEvent->value(); + durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->value().getTimestamp()).count(); + eventsBuffer.add(durationEvent); + + timingsBuffer->add(durationEvent.durationUs); + fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); + + *ongoingEvent = std::nullopt; + + return true; + } else if(event.interval == PipelineEvent::Interval::START) { + // Start event + ongoingEvents->add(event); + } + return false; + } + + inline bool updatePingBuffers(PipelineEvent& event) { + using namespace std::chrono; + std::unique_ptr> emptyIntBuffer; + std::unique_ptr> emptyTimeBuffer; + + auto& ongoingEvents = [&]() -> std::unique_ptr>>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return ongoingMainLoopEvents; + case PipelineEvent::Type::CUSTOM: + return ongoingOtherEvents[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + } + return ongoingMainLoopEvents; // To silence compiler warning + }(); + auto& timingsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return mainLoopTimingsBuffer; + case PipelineEvent::Type::CUSTOM: + return otherTimingsBuffers[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + } + return emptyIntBuffer; // To silence compiler warning + }(); + auto& fpsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + break; + case PipelineEvent::Type::CUSTOM: + return otherFpsBuffers[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + } + return emptyTimeBuffer; // To silence compiler warning + }(); + + if(ongoingEvents == nullptr) ongoingEvents = std::make_unique>>(eventWaitWindow); + if(timingsBuffer == nullptr) timingsBuffer = std::make_unique>(windowSize); + if(fpsBuffer == nullptr) fpsBuffer = std::make_unique>(windowSize); + + auto* ongoingEvent = findOngoingEvent(event.sequenceNum - 1, *ongoingEvents); + + if(ongoingEvent && ongoingEvent->has_value()) { + // End event + NodeState::DurationEvent durationEvent; + durationEvent.startEvent = ongoingEvent->value(); + durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->value().getTimestamp()).count(); + eventsBuffer.add(durationEvent); + + timingsBuffer->add(durationEvent.durationUs); + if(fpsBuffer) fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); + + // Start event + ongoingEvents->add(event); + + return true; + } + // Start event + ongoingEvents->add(event); + + return false; + } + + inline void updateTimingStats(NodeState::TimingStats& stats, const utility::CircularBuffer& buffer) { + stats.minMicros = std::min(stats.minMicros, buffer.last()); + stats.maxMicros = std::max(stats.maxMicros, buffer.last()); + stats.averageMicrosRecent = 0; + stats.stdDevMicrosRecent = 0; + + auto bufferByType = buffer.getBuffer(); + uint64_t sum = 0; + double variance = 0; + for(auto v : bufferByType) { + sum += v; + } + stats.averageMicrosRecent = sum / bufferByType.size(); + // Calculate standard deviation + for(auto v : bufferByType) { + auto diff = v - stats.averageMicrosRecent; + variance += diff * diff; + } + variance /= bufferByType.size(); + stats.stdDevMicrosRecent = (uint64_t)(std::sqrt(variance)); + + std::sort(bufferByType.begin(), bufferByType.end()); + stats.minMicrosRecent = bufferByType.front(); + stats.maxMicrosRecent = bufferByType.back(); + stats.medianMicrosRecent = bufferByType[bufferByType.size() / 2]; + if(bufferByType.size() % 2 == 0) { + stats.medianMicrosRecent = (stats.medianMicrosRecent + bufferByType[bufferByType.size() / 2 - 1]) / 2; + } + } + inline void updateFpsStats(NodeState::Timing& timing, const utility::CircularBuffer& buffer) { + if(buffer.size() < 2) return; + auto timeDiff = std::chrono::duration_cast(buffer.last().time - buffer.first().time).count(); + auto frameDiff = buffer.last().sequenceNum - buffer.first().sequenceNum; + if(timeDiff > 0 && buffer.last().sequenceNum > buffer.first().sequenceNum) { + timing.fps = frameDiff * (1e6f / (float)timeDiff); + } + } + + public: + void add(PipelineEvent& event) { + using namespace std::chrono; + if(event.type == PipelineEvent::Type::INPUT && event.interval == PipelineEvent::Interval::END) { + if(event.queueSize.has_value()) { + if(inputQueueSizesBuffers.find(event.source) == inputQueueSizesBuffers.end()) { + inputQueueSizesBuffers[event.source] = std::make_unique>(windowSize); + } + inputQueueSizesBuffers[event.source]->add(*event.queueSize); + } else { + throw std::runtime_error(fmt::format("INPUT END event must have queue size set source: {}, node {}", event.source, event.nodeId)); + } + } + // Update states + switch(event.type) { + case PipelineEvent::Type::CUSTOM: + case PipelineEvent::Type::LOOP: + break; + case PipelineEvent::Type::INPUT: + if(event.queueSize.has_value()) state.inputStates[event.source].numQueued = *event.queueSize; + switch(event.interval) { + case PipelineEvent::Interval::START: + state.inputStates[event.source].state = NodeState::InputQueueState::State::WAITING; + break; + case PipelineEvent::Interval::END: + state.inputStates[event.source].state = NodeState::InputQueueState::State::IDLE; + break; + case PipelineEvent::Interval::NONE: + if(event.status == -1 || event.status == -2) state.inputStates[event.source].state = NodeState::InputQueueState::State::BLOCKED; + break; + } + break; + case PipelineEvent::Type::OUTPUT: + if(event.interval == PipelineEvent::Interval::START) + state.outputStates[event.source].state = NodeState::OutputQueueState::State::SENDING; + else if(event.interval == PipelineEvent::Interval::END) + state.outputStates[event.source].state = NodeState::OutputQueueState::State::IDLE; + break; + case PipelineEvent::Type::INPUT_BLOCK: + if(event.interval == PipelineEvent::Interval::START) + state.state = NodeState::State::GETTING_INPUTS; + else if(event.interval == PipelineEvent::Interval::END) + state.state = NodeState::State::PROCESSING; + break; + case PipelineEvent::Type::OUTPUT_BLOCK: + if(event.interval == PipelineEvent::Interval::START) + state.state = NodeState::State::SENDING_OUTPUTS; + else if(event.interval == PipelineEvent::Interval::END) + state.state = NodeState::State::PROCESSING; + break; + } + bool addedEvent = false; + if(event.interval == PipelineEvent::Interval::NONE && event.type != PipelineEvent::Type::INPUT && event.type != PipelineEvent::Type::OUTPUT) { + addedEvent = updatePingBuffers(event); + } else if(event.interval != PipelineEvent::Interval::NONE) { + addedEvent = updateIntervalBuffers(event); + } + if(addedEvent /* && ++count % eventBatchSize == 0 */) { // TODO + // By instance + switch(event.type) { + case PipelineEvent::Type::CUSTOM: + updateTimingStats(state.otherTimings[event.source].durationStats, *otherTimingsBuffers[event.source]); + updateFpsStats(state.otherTimings[event.source], *otherFpsBuffers[event.source]); + break; + case PipelineEvent::Type::LOOP: + updateTimingStats(state.mainLoopTiming.durationStats, *mainLoopTimingsBuffer); + state.mainLoopTiming.fps = 1e6f / (float)state.mainLoopTiming.durationStats.averageMicrosRecent; + break; + case PipelineEvent::Type::INPUT: + updateTimingStats(state.inputStates[event.source].timing.durationStats, *inputTimingsBuffers[event.source]); + updateFpsStats(state.inputStates[event.source].timing, *inputFpsBuffers[event.source]); + break; + case PipelineEvent::Type::OUTPUT: + updateTimingStats(state.outputStates[event.source].timing.durationStats, *outputTimingsBuffers[event.source]); + updateFpsStats(state.outputStates[event.source].timing, *outputFpsBuffers[event.source]); + break; + case PipelineEvent::Type::INPUT_BLOCK: + updateTimingStats(state.inputsGetTiming.durationStats, *inputsGetTimingsBuffer); + updateFpsStats(state.inputsGetTiming, *inputsGetFpsBuffer); + break; + case PipelineEvent::Type::OUTPUT_BLOCK: + updateTimingStats(state.outputsSendTiming.durationStats, *outputsSendTimingsBuffer); + updateFpsStats(state.outputsSendTiming, *outputsSendFpsBuffer); + break; + } + } + if(event.type == PipelineEvent::Type::INPUT && event.interval == PipelineEvent::Interval::END /* && ++count % eventBatchSize == 0 */) { // TODO + auto& qStats = state.inputStates[event.source].queueStats; + auto& qBuffer = *inputQueueSizesBuffers[event.source]; + qStats.maxQueued = std::max(qStats.maxQueued, *event.queueSize); + auto qBufferData = qBuffer.getBuffer(); + std::sort(qBufferData.begin(), qBufferData.end()); + qStats.minQueuedRecent = qBufferData.front(); + qStats.maxQueuedRecent = qBufferData.back(); + qStats.medianQueuedRecent = qBufferData[qBufferData.size() / 2]; + if(qBufferData.size() % 2 == 0) { + qStats.medianQueuedRecent = (qStats.medianQueuedRecent + qBufferData[qBufferData.size() / 2 - 1]) / 2; + } + } + } +}; + +class PipelineEventHandler { + std::unordered_map nodeStates; + Node::InputMap* inputs; + uint32_t aggregationWindowSize; + uint32_t eventBatchSize; + uint32_t eventWaitWindow; + + std::atomic running; + + std::thread thread; + + std::shared_ptr logger; + + std::shared_mutex mutex; + + public: + PipelineEventHandler( + Node::InputMap* inputs, uint32_t aggregationWindowSize, uint32_t eventBatchSize, uint32_t eventWaitWindow, std::shared_ptr logger) + : inputs(inputs), aggregationWindowSize(aggregationWindowSize), eventBatchSize(eventBatchSize), eventWaitWindow(eventWaitWindow), logger(logger) {} + void threadedRun() { + while(running) { + std::unordered_map> events; + bool gotEvents = false; + for(auto& [k, v] : *inputs) { + events[k.second] = v.tryGet(); + gotEvents = gotEvents || (events[k.second] != nullptr); + } + for(auto& [k, event] : events) { + if(event != nullptr) { + if(nodeStates.find(event->nodeId) == nodeStates.end()) { + nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(aggregationWindowSize, eventBatchSize, eventWaitWindow, logger)); + } + { + std::unique_lock lock(mutex); + nodeStates.at(event->nodeId).add(*event); + } + } + } + if(!gotEvents) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + } + void run() { + running = true; + thread = std::thread(&PipelineEventHandler::threadedRun, this); + } + void stop() { + running = false; + if(thread.joinable()) thread.join(); + } + bool getState(std::shared_ptr outState, bool sendEvents) { + std::shared_lock lock(mutex); + bool updated = false; + for(auto& [nodeId, nodeState] : nodeStates) { + outState->nodeStates[nodeId] = nodeState.state; + if(sendEvents) outState->nodeStates[nodeId].events = nodeState.eventsBuffer.getBuffer(); + if(nodeState.count % eventBatchSize == 0) updated = true; + } + return updated; + } +}; + +void PipelineEventAggregation::setRunOnHost(bool runOnHost) { + runOnHostVar = runOnHost; +} + +/** + * Check if the node is set to run on host + */ +bool PipelineEventAggregation::runOnHost() const { + return runOnHostVar; +} + +void PipelineEventAggregation::run() { + auto& logger = pimpl->logger; + + PipelineEventHandler handler(&inputs, properties.aggregationWindowSize, properties.eventBatchSize, properties.eventWaitWindow, logger); + handler.run(); + + std::optional currentConfig; + uint32_t sequenceNum = 0; + while(mainLoop()) { + auto outState = std::make_shared(); + bool gotConfig = false; + if(!currentConfig.has_value() || (currentConfig.has_value() && !currentConfig->repeat) || request.has()) { + auto req = request.get(); + if(req != nullptr) { + currentConfig = *req; + gotConfig = true; + } + } + if(gotConfig || (currentConfig.has_value() && currentConfig->repeat)) { + bool sendEvents = false; + if(currentConfig.has_value()) { + for(const auto& nodeCfg : currentConfig->nodes) { + if(nodeCfg.events) { + sendEvents = true; + break; + } + } + } + bool updated = handler.getState(outState, sendEvents); + outState->sequenceNum = sequenceNum++; + outState->configSequenceNum = currentConfig.has_value() ? currentConfig->sequenceNum : 0; + outState->setTimestamp(std::chrono::steady_clock::now()); + outState->tsDevice = outState->ts; + + if(!currentConfig->nodes.empty()) { + for(auto it = outState->nodeStates.begin(); it != outState->nodeStates.end();) { + auto nodeConfig = std::find_if(currentConfig->nodes.begin(), currentConfig->nodes.end(), [&](const NodeEventAggregationConfig& cfg) { + return cfg.nodeId == it->first; + }); + if(nodeConfig == currentConfig->nodes.end()) { + it = outState->nodeStates.erase(it); + } else { + if(nodeConfig->inputs.has_value()) { + auto inputStates = it->second.inputStates; + it->second.inputStates.clear(); + for(const auto& inputName : *nodeConfig->inputs) { + if(inputStates.find(inputName) != inputStates.end()) { + it->second.inputStates[inputName] = inputStates[inputName]; + } + } + } + if(nodeConfig->outputs.has_value()) { + auto outputStates = it->second.outputStates; + it->second.outputStates.clear(); + for(const auto& outputName : *nodeConfig->outputs) { + if(outputStates.find(outputName) != outputStates.end()) { + it->second.outputStates[outputName] = outputStates[outputName]; + } + } + } + if(nodeConfig->others.has_value()) { + auto otherTimings = it->second.otherTimings; + it->second.otherTimings.clear(); + for(const auto& otherName : *nodeConfig->others) { + if(otherTimings.find(otherName) != otherTimings.end()) { + it->second.otherTimings[otherName] = otherTimings[otherName]; + } + } + } + ++it; + } + } + } + if(gotConfig || (currentConfig.has_value() && currentConfig->repeat && updated)) out.send(outState); + } + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + handler.stop(); +} + +} // namespace internal +} // namespace node +} // namespace dai diff --git a/src/pipeline/node/internal/PipelineStateMerge.cpp b/src/pipeline/node/internal/PipelineStateMerge.cpp new file mode 100644 index 0000000000..7e65ccd882 --- /dev/null +++ b/src/pipeline/node/internal/PipelineStateMerge.cpp @@ -0,0 +1,79 @@ +#include "depthai/pipeline/node/internal/PipelineStateMerge.hpp" + +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" +#include "pipeline/ThreadedNodeImpl.hpp" + +namespace dai { +namespace node { + +std::shared_ptr PipelineStateMerge::build(bool hasDeviceNodes, bool hasHostNodes) { + this->hasDeviceNodes = hasDeviceNodes; + this->hasHostNodes = hasHostNodes; + return std::static_pointer_cast(shared_from_this()); +} + +void mergeStates(std::shared_ptr& outState, const std::shared_ptr& inState) { + for(const auto& [key, value] : inState->nodeStates) { + if(outState->nodeStates.find(key) != outState->nodeStates.end()) { + throw std::runtime_error("PipelineStateMerge: Duplicate node state for nodeId " + std::to_string(key)); + } else + outState->nodeStates[key] = value; + } +} +void PipelineStateMerge::run() { + auto& logger = pimpl->logger; + if(!hasDeviceNodes && !hasHostNodes) { + logger->warn("PipelineStateMerge: both device and host nodes are disabled. Have you built the node?"); + } + std::optional currentConfig; + uint32_t sequenceNum = 0; + uint32_t configSequenceNum = 0; + while(mainLoop()) { + auto outState = std::make_shared(); + bool waitForMatch = false; + if(!currentConfig.has_value() || (currentConfig.has_value() && !currentConfig->repeat) || request.has()) { + auto req = request.get(); + if(req != nullptr) { + currentConfig = *req; + currentConfig->setSequenceNum(++configSequenceNum); + outRequest.send(std::make_shared(currentConfig.value())); + waitForMatch = true; + } + } + if(hasDeviceNodes) { + auto deviceState = inputDevice.get(); + if(waitForMatch && deviceState != nullptr && currentConfig.has_value()) { + while(isRunning() && deviceState->configSequenceNum != currentConfig->sequenceNum) { + deviceState = inputDevice.get(); + } + } + if(deviceState != nullptr) { + *outState = *deviceState; + } + } + if(hasHostNodes) { + auto hostState = inputHost.get(); + if(waitForMatch && hostState != nullptr && currentConfig.has_value()) { + while(isRunning() && hostState->configSequenceNum != currentConfig->sequenceNum) { + hostState = inputHost.get(); + if(!isRunning()) break; + } + } + if(hostState != nullptr) { + if(hasDeviceNodes) { + // merge + mergeStates(outState, hostState); + auto minTimestamp = std::min(outState->getTimestamp(), hostState->getTimestamp()); + outState->setTimestamp(minTimestamp); + outState->sequenceNum = sequenceNum++; + } else { + *outState = *hostState; + } + } + } + out.send(outState); + } +} +} // namespace node +} // namespace dai diff --git a/src/pipeline/node/internal/XLinkOutHost.cpp b/src/pipeline/node/internal/XLinkOutHost.cpp index c7890fbced..de5d782d40 100644 --- a/src/pipeline/node/internal/XLinkOutHost.cpp +++ b/src/pipeline/node/internal/XLinkOutHost.cpp @@ -53,7 +53,7 @@ void XLinkOutHost::run() { stream = XLinkStream(this->conn, this->streamName, maxSize); currentMaxSize = maxSize; }; - while(isRunning()) { + while(mainLoop()) { try { auto outgoing = in.get(); auto metadata = StreamMessageParser::serializeMetadata(outgoing); diff --git a/src/pipeline/node/test/MyProducer.cpp b/src/pipeline/node/test/MyProducer.cpp index 260d6807fc..611cc14ad4 100644 --- a/src/pipeline/node/test/MyProducer.cpp +++ b/src/pipeline/node/test/MyProducer.cpp @@ -12,7 +12,7 @@ namespace test { void MyProducer::run() { std::cout << "Hello, I just woke up and I'm ready to do some work!\n"; - while(isRunning()) { + while(mainLoop()) { std::this_thread::sleep_for(std::chrono::milliseconds(1500)); auto buf = std::make_shared(); diff --git a/src/rtabmap/RTABMapSLAM.cpp b/src/rtabmap/RTABMapSLAM.cpp index 65243135b2..d7a5ed0d87 100644 --- a/src/rtabmap/RTABMapSLAM.cpp +++ b/src/rtabmap/RTABMapSLAM.cpp @@ -115,7 +115,7 @@ void RTABMapSLAM::odomPoseCB(std::shared_ptr data) { void RTABMapSLAM::run() { auto& logger = pimpl->logger; - while(isRunning()) { + while(mainLoop()) { if(!initialized) { continue; } else { diff --git a/src/rtabmap/RTABMapVIO.cpp b/src/rtabmap/RTABMapVIO.cpp index da82d09248..55f1ddae5e 100644 --- a/src/rtabmap/RTABMapVIO.cpp +++ b/src/rtabmap/RTABMapVIO.cpp @@ -129,7 +129,7 @@ void RTABMapVIO::syncCB(std::shared_ptr data) { } void RTABMapVIO::run() { - while(isRunning()) { + while(mainLoop()) { auto msg = inSync.get(); if(msg != nullptr) { syncCB(msg); diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp new file mode 100644 index 0000000000..e0a9e79bfd --- /dev/null +++ b/src/utility/PipelineEventDispatcher.cpp @@ -0,0 +1,206 @@ +#include "depthai/utility/PipelineEventDispatcher.hpp" + +#include + +namespace dai { +namespace utility { + +std::string typeToString(PipelineEvent::Type type) { + switch(type) { + case PipelineEvent::Type::CUSTOM: + return "CUSTOM"; + case PipelineEvent::Type::LOOP: + return "LOOP"; + case PipelineEvent::Type::INPUT: + return "INPUT"; + case PipelineEvent::Type::OUTPUT: + return "OUTPUT"; + case PipelineEvent::Type::INPUT_BLOCK: + return "INPUT_BLOCK"; + case PipelineEvent::Type::OUTPUT_BLOCK: + return "OUTPUT_BLOCK"; + default: + return "UNKNOWN"; + } +} +std::string makeKey(PipelineEvent::Type type, const std::string& source) { + return typeToString(type) + "#" + source; +} + +bool blacklist(PipelineEvent::Type type, const std::string& source) { + if(type == PipelineEvent::Type::OUTPUT && source == "pipelineEventOutput") return true; + return false; +} + +void PipelineEventDispatcher::checkNodeId() { + if(nodeId == -1) { + throw std::runtime_error("Node ID not set on PipelineEventDispatcher"); + } +} +void PipelineEventDispatcher::setNodeId(int64_t id) { + nodeId = id; +} +void PipelineEventDispatcher::startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { + if(!sendEvents) return; + checkNodeId(); + if(blacklist(type, source)) return; + + std::lock_guard lock(mutex); + + auto& event = events[makeKey(type, source)]; + event.event.setTimestamp(std::chrono::steady_clock::now()); + event.event.tsDevice = event.event.ts; + ++event.event.sequenceNum; + event.event.nodeId = nodeId; + event.event.queueSize = std::move(queueSize); + event.event.interval = PipelineEvent::Interval::START; + event.event.type = type; + event.event.source = source; + event.ongoing = true; + + if(out) { + out->send(std::make_shared(event.event)); + } +} +void PipelineEventDispatcher::startInputEvent(const std::string& source, std::optional queueSize) { + startEvent(PipelineEvent::Type::INPUT, source, std::move(queueSize)); +} +void PipelineEventDispatcher::startOutputEvent(const std::string& source) { + startEvent(PipelineEvent::Type::OUTPUT, source, std::nullopt); +} +void PipelineEventDispatcher::startCustomEvent(const std::string& source) { + startEvent(PipelineEvent::Type::CUSTOM, source, std::nullopt); +} +void PipelineEventDispatcher::endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { + if(!sendEvents) return; + checkNodeId(); + if(blacklist(type, source)) return; + + std::lock_guard lock(mutex); + + auto now = std::chrono::steady_clock::now(); + + auto& event = events[makeKey(type, source)]; + if(!event.ongoing) { + throw std::runtime_error("Event with name " + source + " has not been started"); + } + + event.event.setTimestamp(now); + event.event.tsDevice = event.event.ts; + event.event.nodeId = nodeId; + event.event.queueSize = std::move(queueSize); + event.event.interval = PipelineEvent::Interval::END; + event.event.type = type; + event.event.source = source; + event.ongoing = false; + + if(out) { + out->send(std::make_shared(event.event)); + } + + event.event.queueSize = std::nullopt; +} +void PipelineEventDispatcher::endInputEvent(const std::string& source, std::optional queueSize) { + endEvent(PipelineEvent::Type::INPUT, source, std::move(queueSize)); +} +void PipelineEventDispatcher::endOutputEvent(const std::string& source) { + endEvent(PipelineEvent::Type::OUTPUT, source, std::nullopt); +} +void PipelineEventDispatcher::endCustomEvent(const std::string& source) { + endEvent(PipelineEvent::Type::CUSTOM, source, std::nullopt); +} +void PipelineEventDispatcher::pingEvent(PipelineEvent::Type type, const std::string& source) { + if(!sendEvents) return; + checkNodeId(); + if(blacklist(type, source)) return; + + std::lock_guard lock(mutex); + + auto now = std::chrono::steady_clock::now(); + + auto& event = events[makeKey(type, source)]; + if(event.ongoing) { + throw std::runtime_error("Event with name " + source + " is already ongoing"); + } + event.event.setTimestamp(now); + event.event.tsDevice = event.event.ts; + ++event.event.sequenceNum; + event.event.nodeId = nodeId; + event.event.interval = PipelineEvent::Interval::NONE; + event.event.type = type; + event.event.source = source; + + if(out) { + out->send(std::make_shared(event.event)); + } +} +void PipelineEventDispatcher::pingTrackedEvent(PipelineEvent::Type type, const std::string& source, int64_t sequenceNum) { + if(!sendEvents) return; + checkNodeId(); + if(blacklist(type, source)) return; + + std::lock_guard lock(mutex); + + auto now = std::chrono::steady_clock::now(); + + auto event = std::make_shared(); + event->setTimestamp(now); + event->tsDevice = event->ts; + event->sequenceNum = sequenceNum; + event->nodeId = nodeId; + event->interval = PipelineEvent::Interval::NONE; + event->type = type; + event->source = source; + + if(out) { + out->send(event); + } +} +void PipelineEventDispatcher::pingMainLoopEvent() { + pingEvent(PipelineEvent::Type::LOOP, "_mainLoop"); +} +void PipelineEventDispatcher::pingCustomEvent(const std::string& source) { + pingEvent(PipelineEvent::Type::CUSTOM, source); +} +void PipelineEventDispatcher::pingInputEvent(const std::string& source, int32_t status, std::optional queueSize) { + if(!sendEvents) return; + checkNodeId(); + if(blacklist(PipelineEvent::Type::INPUT, source)) return; + + std::lock_guard lock(mutex); + + auto now = std::chrono::steady_clock::now(); + + auto& event = events[makeKey(PipelineEvent::Type::INPUT, source)]; + PipelineEvent eventCopy = event.event; + eventCopy.setTimestamp(now); + eventCopy.tsDevice = eventCopy.ts; + eventCopy.nodeId = nodeId; + eventCopy.status = std::move(status); + eventCopy.queueSize = std::move(queueSize); + eventCopy.interval = PipelineEvent::Interval::NONE; + eventCopy.type = PipelineEvent::Type::INPUT; + eventCopy.source = source; + + if(out) { + out->send(std::make_shared(eventCopy)); + } +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::blockEvent(PipelineEvent::Type type, const std::string& source) { + return BlockPipelineEvent(*this, type, source); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::inputBlockEvent(const std::string& source) { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::INPUT_BLOCK, source); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::outputBlockEvent(const std::string& source) { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::OUTPUT_BLOCK, source); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::customBlockEvent(const std::string& source) { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::CUSTOM, source); +} + +} // namespace utility +} // namespace dai diff --git a/src/utility/ProtoSerialize.cpp b/src/utility/ProtoSerialize.cpp index 300600579b..5894fcbb7b 100644 --- a/src/utility/ProtoSerialize.cpp +++ b/src/utility/ProtoSerialize.cpp @@ -170,6 +170,9 @@ bool deserializationSupported(DatatypeEnum datatype) { case DatatypeEnum::DynamicCalibrationResult: case DatatypeEnum::CalibrationQuality: case DatatypeEnum::CoverageData: + case DatatypeEnum::PipelineEvent: + case DatatypeEnum::PipelineState: + case DatatypeEnum::PipelineEventAggregationConfig: return false; } return false; diff --git a/tests/src/ondevice_tests/input_output_naming_test.cpp b/tests/src/ondevice_tests/input_output_naming_test.cpp index b246788b22..d845ced6eb 100644 --- a/tests/src/ondevice_tests/input_output_naming_test.cpp +++ b/tests/src/ondevice_tests/input_output_naming_test.cpp @@ -8,7 +8,7 @@ class TestNode : public dai::node::CustomThreadedNode { public: void run() override { - while(isRunning()) { + while(mainLoop()) { // do nothing } }