From 5110bcc29451ab2c733ea5627aa319ca2d4b69f5 Mon Sep 17 00:00:00 2001 From: asahtik Date: Tue, 16 Sep 2025 15:46:16 +0200 Subject: [PATCH 01/40] Added pipeline events messages and node --- CMakeLists.txt | 3 ++ .../pipeline/datatype/DatatypeEnum.hpp | 2 + .../pipeline/datatype/PipelineEvent.hpp | 41 +++++++++++++++ .../pipeline/datatype/PipelineState.hpp | 42 +++++++++++++++ .../node/PipelineEventAggregation.hpp | 52 +++++++++++++++++++ .../PipelineEventAggregationProperties.hpp | 16 ++++++ src/pipeline/datatype/PipelineEvent.cpp | 4 ++ src/pipeline/datatype/PipelineState.cpp | 4 ++ src/pipeline/datatype/StreamMessageParser.cpp | 8 +++ .../node/PipelineEventAggregation.cpp | 23 ++++++++ 10 files changed, 195 insertions(+) create mode 100644 include/depthai/pipeline/datatype/PipelineEvent.hpp create mode 100644 include/depthai/pipeline/datatype/PipelineState.hpp create mode 100644 include/depthai/pipeline/node/PipelineEventAggregation.hpp create mode 100644 include/depthai/properties/PipelineEventAggregationProperties.hpp create mode 100644 src/pipeline/datatype/PipelineEvent.cpp create mode 100644 src/pipeline/datatype/PipelineState.cpp create mode 100644 src/pipeline/node/PipelineEventAggregation.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index d40d6ac32..4b5f62af0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -318,6 +318,7 @@ set(TARGET_CORE_SOURCES src/pipeline/node/host/RGBD.cpp src/pipeline/datatype/DatatypeEnum.cpp src/pipeline/node/PointCloud.cpp + src/pipeline/node/PipelineEventAggregation.cpp src/pipeline/datatype/Buffer.cpp src/pipeline/datatype/ImgFrame.cpp src/pipeline/datatype/ImgTransformations.cpp @@ -347,6 +348,8 @@ set(TARGET_CORE_SOURCES src/pipeline/datatype/PointCloudConfig.cpp src/pipeline/datatype/ObjectTrackerConfig.cpp src/pipeline/datatype/PointCloudData.cpp + src/pipeline/datatype/PipelineEvent.cpp + src/pipeline/datatype/PipelineState.cpp src/pipeline/datatype/RGBDData.cpp src/pipeline/datatype/MessageGroup.cpp src/pipeline/datatype/TransformData.cpp diff --git a/include/depthai/pipeline/datatype/DatatypeEnum.hpp b/include/depthai/pipeline/datatype/DatatypeEnum.hpp index d8d594263..a501ad7f4 100644 --- a/include/depthai/pipeline/datatype/DatatypeEnum.hpp +++ b/include/depthai/pipeline/datatype/DatatypeEnum.hpp @@ -43,6 +43,8 @@ enum class DatatypeEnum : std::int32_t { DynamicCalibrationResult, CalibrationQuality, CoverageData, + PipelineEvent, + PipelineState, }; bool isDatatypeSubclassOf(DatatypeEnum parent, DatatypeEnum children); diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp new file mode 100644 index 000000000..a8c7af539 --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include + +#include "depthai/common/Timestamp.hpp" +#include "depthai/common/optional.hpp" +#include "depthai/common/variant.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" + +namespace dai { + +/** + * Pipeline event message. + */ +class PipelineEvent : public Buffer { + public: + enum class EventType : std::int32_t { + CUSTOM = 0, + LOOP = 1, + INPUT = 2, + OUTPUT = 3, + FUNC_CALL = 4 + }; + + PipelineEvent() = default; + virtual ~PipelineEvent() = default; + + std::optional metadata; + uint64_t duration {0}; // Duration in microseconds + EventType type = EventType::CUSTOM; + std::variant> source; + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineEvent; + }; + + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, metadata, duration, type, source); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp new file mode 100644 index 000000000..465ee6beb --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include + +#include "depthai/pipeline/datatype/Buffer.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { + +class NodeState { + public: + struct Timing { + uint64_t averageMicros; + uint64_t stdDevMicros; + DEPTHAI_SERIALIZE(Timing, averageMicros, stdDevMicros); + }; + std::vector events; + std::unordered_map timingsByType; + std::unordered_map timingsByInstance; + + DEPTHAI_SERIALIZE(NodeState, events, timingsByType, timingsByInstance); +}; + +/** + * Pipeline event message. + */ +class PipelineState : public Buffer { + public: + PipelineState() = default; + virtual ~PipelineState() = default; + + std::unordered_map nodeStates; + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineState; + }; + + DEPTHAI_SERIALIZE(PipelineState, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeStates); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/node/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/PipelineEventAggregation.hpp new file mode 100644 index 000000000..f2636a4a9 --- /dev/null +++ b/include/depthai/pipeline/node/PipelineEventAggregation.hpp @@ -0,0 +1,52 @@ +#pragma once + +#include + +// shared +#include + +namespace dai { +namespace node { + +/** + * @brief Sync node. Performs syncing between image frames + */ +class PipelineEventAggregation : public DeviceNodeCRTP, public HostRunnable { + private: + bool runOnHostVar = false; + + public: + constexpr static const char* NAME = "PipelineEventAggregation"; + using DeviceNodeCRTP::DeviceNodeCRTP; + + /** + * A map of inputs + */ + InputMap inputs{*this, "inputs", {"", DEFAULT_GROUP, false, 10, {{{DatatypeEnum::PipelineEvent, false}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + + /** + * A map of outputs + */ + OutputMap outputs{*this, "outputs", {DEFAULT_NAME, DEFAULT_GROUP, {{{DatatypeEnum::PipelineEvent, false}}}}}; + + /** + * Output message of type TODO + */ + Output out{*this, {"out", DEFAULT_GROUP, {{{DatatypeEnum::Buffer, false}}}}}; + + /** + * Specify whether to run on host or device + * By default, the node will run on device. + */ + void setRunOnHost(bool runOnHost); + + /** + * Check if the node is set to run on host + */ + bool runOnHost() const override; + + void run() override; +}; + +} // namespace node +} // namespace dai diff --git a/include/depthai/properties/PipelineEventAggregationProperties.hpp b/include/depthai/properties/PipelineEventAggregationProperties.hpp new file mode 100644 index 000000000..654e10950 --- /dev/null +++ b/include/depthai/properties/PipelineEventAggregationProperties.hpp @@ -0,0 +1,16 @@ +#pragma once + +#include "depthai/properties/Properties.hpp" + +namespace dai { + +/** + * Specify properties for Sync. + */ +struct PipelineEventAggregationProperties : PropertiesSerializable { + int dummy = 0; +}; + +DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, dummy); + +} // namespace dai diff --git a/src/pipeline/datatype/PipelineEvent.cpp b/src/pipeline/datatype/PipelineEvent.cpp new file mode 100644 index 000000000..4d7121336 --- /dev/null +++ b/src/pipeline/datatype/PipelineEvent.cpp @@ -0,0 +1,4 @@ +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +} // namespace dai diff --git a/src/pipeline/datatype/PipelineState.cpp b/src/pipeline/datatype/PipelineState.cpp new file mode 100644 index 000000000..f45bb47e9 --- /dev/null +++ b/src/pipeline/datatype/PipelineState.cpp @@ -0,0 +1,4 @@ +#include "depthai/pipeline/datatype/PipelineState.hpp" + +namespace dai { +} // namespace dai diff --git a/src/pipeline/datatype/StreamMessageParser.cpp b/src/pipeline/datatype/StreamMessageParser.cpp index 660afa832..7ec39151d 100644 --- a/src/pipeline/datatype/StreamMessageParser.cpp +++ b/src/pipeline/datatype/StreamMessageParser.cpp @@ -15,6 +15,8 @@ #include "depthai/pipeline/datatype/BenchmarkReport.hpp" #include "depthai/pipeline/datatype/Buffer.hpp" #include "depthai/pipeline/datatype/CameraControl.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" #ifdef DEPTHAI_HAVE_DYNAMIC_CALIBRATION_SUPPORT #include "depthai/pipeline/datatype/DynamicCalibrationControl.hpp" #include "depthai/pipeline/datatype/DynamicCalibrationResults.hpp" @@ -238,6 +240,12 @@ std::shared_ptr StreamMessageParser::parseMessage(streamPacketDesc_t* case DatatypeEnum::PointCloudData: return parseDatatype(metadataStart, serializedObjectSize, data, fd); break; + case DatatypeEnum::PipelineEvent: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; + case DatatypeEnum::PipelineState: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; case DatatypeEnum::MessageGroup: return parseDatatype(metadataStart, serializedObjectSize, data, fd); break; diff --git a/src/pipeline/node/PipelineEventAggregation.cpp b/src/pipeline/node/PipelineEventAggregation.cpp new file mode 100644 index 000000000..5cf3b48a3 --- /dev/null +++ b/src/pipeline/node/PipelineEventAggregation.cpp @@ -0,0 +1,23 @@ +#include "depthai/pipeline/node/PipelineEventAggregation.hpp" + +namespace dai { +namespace node { + +void PipelineEventAggregation::setRunOnHost(bool runOnHost) { + runOnHostVar = runOnHost; +} + +/** + * Check if the node is set to run on host + */ +bool PipelineEventAggregation::runOnHost() const { + return runOnHostVar; +} + +void PipelineEventAggregation::run() { + while(isRunning()) { + + } +} +} // namespace node +} // namespace dai From e165806fe912218241b7f54f37c9657d69eca0ce Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 18 Sep 2025 13:48:54 +0200 Subject: [PATCH 02/40] Added the pipeline event dispatcher --- CMakeLists.txt | 1 + .../pipeline/datatype/PipelineEvent.hpp | 7 +- .../pipeline/datatype/PipelineState.hpp | 3 +- .../PipelineEventAggregationProperties.hpp | 6 +- include/depthai/utility/CircularBuffer.hpp | 53 +++++++++++ .../utility/PipelineEventDispatcher.hpp | 39 ++++++++ .../node/PipelineEventAggregation.cpp | 95 ++++++++++++++++++- src/utility/PipelineEventDispatcher.cpp | 78 +++++++++++++++ 8 files changed, 274 insertions(+), 8 deletions(-) create mode 100644 include/depthai/utility/CircularBuffer.hpp create mode 100644 include/depthai/utility/PipelineEventDispatcher.hpp create mode 100644 src/utility/PipelineEventDispatcher.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4b5f62af0..bd1ac83ab 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -359,6 +359,7 @@ set(TARGET_CORE_SOURCES src/utility/Initialization.cpp src/utility/Resources.cpp src/utility/Platform.cpp + src/utility/PipelineEventDispatcher.cpp src/utility/RecordReplay.cpp src/utility/McapImpl.cpp src/utility/Environment.cpp diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp index a8c7af539..9ea1b92f2 100644 --- a/include/depthai/pipeline/datatype/PipelineEvent.hpp +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -2,9 +2,7 @@ #include -#include "depthai/common/Timestamp.hpp" #include "depthai/common/optional.hpp" -#include "depthai/common/variant.hpp" #include "depthai/pipeline/datatype/Buffer.hpp" namespace dai { @@ -25,17 +23,18 @@ class PipelineEvent : public Buffer { PipelineEvent() = default; virtual ~PipelineEvent() = default; + int64_t nodeId = -1; std::optional metadata; uint64_t duration {0}; // Duration in microseconds EventType type = EventType::CUSTOM; - std::variant> source; + std::string source; void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { metadata = utility::serialize(*this); datatype = DatatypeEnum::PipelineEvent; }; - DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, metadata, duration, type, source); + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, metadata, duration, type, source); }; } // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index 465ee6beb..64ddbbf34 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -4,6 +4,7 @@ #include "depthai/pipeline/datatype/Buffer.hpp" #include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/common/optional.hpp" namespace dai { @@ -29,7 +30,7 @@ class PipelineState : public Buffer { PipelineState() = default; virtual ~PipelineState() = default; - std::unordered_map nodeStates; + std::unordered_map> nodeStates; void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { metadata = utility::serialize(*this); diff --git a/include/depthai/properties/PipelineEventAggregationProperties.hpp b/include/depthai/properties/PipelineEventAggregationProperties.hpp index 654e10950..028f0bf09 100644 --- a/include/depthai/properties/PipelineEventAggregationProperties.hpp +++ b/include/depthai/properties/PipelineEventAggregationProperties.hpp @@ -8,9 +8,11 @@ namespace dai { * Specify properties for Sync. */ struct PipelineEventAggregationProperties : PropertiesSerializable { - int dummy = 0; + uint32_t aggregationWindowSize = 20; + uint32_t eventBatchSize = 10; + bool sendEvents = false; }; -DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, dummy); +DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, aggregationWindowSize, eventBatchSize, sendEvents); } // namespace dai diff --git a/include/depthai/utility/CircularBuffer.hpp b/include/depthai/utility/CircularBuffer.hpp new file mode 100644 index 000000000..a0f631d60 --- /dev/null +++ b/include/depthai/utility/CircularBuffer.hpp @@ -0,0 +1,53 @@ +#pragma once +#include +#include + +namespace dai { +namespace utility { + +template +class CircularBuffer { + public: + CircularBuffer(size_t size) : maxSize(size) { + buffer.reserve(size); + } + void add(int value) { + if(buffer.size() < maxSize) { + buffer.push_back(value); + } else { + buffer[index] = value; + index = (index + 1) % maxSize; + } + } + std::vector getBuffer() const { + std::vector result; + if(buffer.size() < maxSize) { + result = buffer; + } else { + result.insert(result.end(), buffer.begin() + index, buffer.end()); + result.insert(result.end(), buffer.begin(), buffer.begin() + index); + } + return result; + } + T last() const { + if(buffer.empty()) { + throw std::runtime_error("CircularBuffer is empty"); + } + if(buffer.size() < maxSize) { + return buffer.back(); + } else { + return buffer[(index + maxSize - 1) % maxSize]; + } + } + size_t size() const { + return buffer.size(); + } + + private: + std::vector buffer; + size_t maxSize; + size_t index = 0; +}; + +} // namespace utility +} // namespace dai diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp new file mode 100644 index 000000000..4dfb006a3 --- /dev/null +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +namespace utility { + +class PipelineEventDispatcher { + struct EventStatus { + PipelineEvent::EventType type; + std::chrono::microseconds duration; + std::chrono::time_point timestamp; + bool ongoing; + std::optional metadata; + }; + + int64_t nodeId; + std::unordered_map events; + Node::Output* out = nullptr; + + public: + PipelineEventDispatcher(int64_t nodeId, Node::Output* output) : nodeId(nodeId), out(output) {} + + void addEvent(const std::string& source, PipelineEvent::EventType type); + + void startEvent(const std::string& source, std::optional metadata = std::nullopt); // Start event with a start and an end + void endEvent(const std::string& source); // Stop event with a start and an end + void pingEvent(const std::string& source); // Event where stop and start are the same (eg. loop) +}; + +} // namespace utility +} // namespace dai diff --git a/src/pipeline/node/PipelineEventAggregation.cpp b/src/pipeline/node/PipelineEventAggregation.cpp index 5cf3b48a3..bb4be94b9 100644 --- a/src/pipeline/node/PipelineEventAggregation.cpp +++ b/src/pipeline/node/PipelineEventAggregation.cpp @@ -1,8 +1,75 @@ #include "depthai/pipeline/node/PipelineEventAggregation.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" +#include "depthai/utility/CircularBuffer.hpp" + namespace dai { namespace node { +class NodeEventAggregation { + int windowSize; + + public: + NodeEventAggregation(int windowSize) : windowSize(windowSize) {} + NodeState state; + std::unordered_map>> timingsBufferByType; + std::unordered_map>> timingsBufferByInstance; + + void add(PipelineEvent& event) { + // TODO optimize avg + state.events.push_back(event); + if(timingsBufferByType.find(event.type) == timingsBufferByType.end()) { + timingsBufferByType[event.type] = std::make_unique>(windowSize); + } + if(timingsBufferByInstance.find(event.source) == timingsBufferByInstance.end()) { + timingsBufferByInstance[event.source] = std::make_unique>(windowSize); + } + timingsBufferByType[event.type]->add(event.duration); + timingsBufferByInstance[event.source]->add(event.duration); + // Calculate average duration and standard deviation from buffers + state.timingsByType[event.type].averageMicros = 0; + state.timingsByType[event.type].stdDevMicros = 0; + state.timingsByInstance[event.source].averageMicros = 0; + state.timingsByInstance[event.source].stdDevMicros = 0; + auto bufferByType = timingsBufferByType[event.type]->getBuffer(); + auto bufferByInstance = timingsBufferByInstance[event.source]->getBuffer(); + if(!bufferByType.empty()) { + uint64_t sum = 0; + double variance = 0; + for(auto v : bufferByType) { + sum += v; + } + state.timingsByType[event.type].averageMicros = sum / bufferByType.size(); + // Calculate standard deviation + for(auto v : bufferByType) { + auto diff = v - state.timingsByType[event.type].averageMicros; + variance += diff * diff; + } + variance /= bufferByType.size(); + state.timingsByType[event.type].stdDevMicros = (uint64_t)(std::sqrt(variance)); + } + if(!bufferByInstance.empty()) { + uint64_t sum = 0; + double variance = 0; + for(auto v : bufferByInstance) { + sum += v; + } + state.timingsByInstance[event.source].averageMicros = sum / bufferByInstance.size(); + // Calculate standard deviation + for(auto v : bufferByInstance) { + auto diff = v - state.timingsByInstance[event.source].averageMicros; + variance += diff * diff; + } + variance /= bufferByInstance.size(); + state.timingsByInstance[event.source].stdDevMicros = (uint64_t)(std::sqrt(variance)); + } + } + void resetEvents() { + state.events.clear(); + } +}; + void PipelineEventAggregation::setRunOnHost(bool runOnHost) { runOnHostVar = runOnHost; } @@ -15,8 +82,34 @@ bool PipelineEventAggregation::runOnHost() const { } void PipelineEventAggregation::run() { + std::unordered_map nodeStates; while(isRunning()) { - + std::unordered_map> events; + for(auto& [k, v] : inputs) { + events[k.second] = v.get(); + } + for(auto& [k, event] : events) { + if(event != nullptr) { + if(nodeStates.find(event->nodeId) == nodeStates.end()) { + nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(properties.aggregationWindowSize)); + } + nodeStates.at(event->nodeId).add(*event); + } + } + auto outState = std::make_shared(); + bool shouldSend = false; + for(auto& [nodeId, nodeState] : nodeStates) { + auto numEvents = nodeState.state.events.size(); + if(numEvents >= properties.eventBatchSize) { + outState->nodeStates[nodeId] = nodeState.state; + if(!properties.sendEvents) outState->nodeStates[nodeId]->events.clear(); + nodeState.resetEvents(); + shouldSend = true; + } else { + outState->nodeStates[nodeId] = std::nullopt; + } + } + if(shouldSend) out.send(outState); } } } // namespace node diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp new file mode 100644 index 000000000..ceb19451c --- /dev/null +++ b/src/utility/PipelineEventDispatcher.cpp @@ -0,0 +1,78 @@ +#include "depthai/utility/PipelineEventDispatcher.hpp" +#include + +namespace dai { +namespace utility { + +void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent::EventType type) { + if(events.find(source) != events.end()) { + throw std::runtime_error("Event with name " + source + " already exists"); + } + events[source] = {type, {}, {}, false, std::nullopt}; +} +void PipelineEventDispatcher::startEvent(const std::string& source, std::optional metadata) { + if(events.find(source) == events.end()) { + throw std::runtime_error("Event with name " + source + " does not exist"); + } + auto& event = events[source]; + if(event.ongoing) { + throw std::runtime_error("Event with name " + source + " is already ongoing"); + } + event.timestamp = std::chrono::steady_clock::now(); + event.ongoing = true; + event.metadata = metadata; +} +void PipelineEventDispatcher::endEvent(const std::string& source) { + auto now = std::chrono::steady_clock::now(); + + if(events.find(source) == events.end()) { + throw std::runtime_error("Event with name " + source + " does not exist"); + } + auto& event = events[source]; + if(!event.ongoing) { + throw std::runtime_error("Event with name " + source + " has not been started"); + } + event.duration = std::chrono::duration_cast(now - event.timestamp); + event.ongoing = false; + + PipelineEvent pipelineEvent; + pipelineEvent.nodeId = nodeId; + pipelineEvent.duration = event.duration.count(); + pipelineEvent.type = event.type; + pipelineEvent.source = source; + + if(out) { + out->send(std::make_shared(pipelineEvent)); + } + + event.metadata = std::nullopt; +} +void PipelineEventDispatcher::pingEvent(const std::string& source) { + auto now = std::chrono::steady_clock::now(); + + if(events.find(source) == events.end()) { + throw std::runtime_error("Event with name " + source + " does not exist"); + } + auto& event = events[source]; + if(event.ongoing) { + event.duration = std::chrono::duration_cast(now - event.timestamp); + event.timestamp = now; + + PipelineEvent pipelineEvent; + pipelineEvent.nodeId = nodeId; + pipelineEvent.duration = event.duration.count(); + pipelineEvent.type = event.type; + pipelineEvent.source = source; + pipelineEvent.metadata = std::nullopt; + + if(out) { + out->send(std::make_shared(pipelineEvent)); + } + } else { + event.timestamp = now; + event.ongoing = true; + } +} + +} // namespace utility +} // namespace dai From fbd4137199f8b8213e49aa262969432ab372aedc Mon Sep 17 00:00:00 2001 From: asahtik Date: Mon, 22 Sep 2025 10:56:41 +0200 Subject: [PATCH 03/40] Add Pipeline event bindings --- bindings/python/CMakeLists.txt | 7 ++- bindings/python/src/DatatypeBindings.cpp | 8 ++- .../datatype/PipelineEventBindings.cpp | 55 ++++++++++++++++++ .../datatype/PipelineStateBindings.cpp | 57 +++++++++++++++++++ .../python/src/pipeline/node/NodeBindings.cpp | 2 + .../node/PipelineEventAggregationBindings.cpp | 38 +++++++++++++ bindings/python/src/py_bindings.cpp | 2 + .../PipelineEventDispatcherBindings.cpp | 27 +++++++++ .../PipelineEventDispatcherBindings.hpp | 8 +++ .../node/PipelineEventAggregation.hpp | 9 +-- .../utility/PipelineEventDispatcher.hpp | 1 + src/pipeline/node/host/Replay.cpp | 4 ++ src/utility/ProtoSerialize.cpp | 2 + 13 files changed, 210 insertions(+), 10 deletions(-) create mode 100644 bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp create mode 100644 bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp create mode 100644 bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp create mode 100644 bindings/python/src/utility/PipelineEventDispatcherBindings.cpp create mode 100644 bindings/python/src/utility/PipelineEventDispatcherBindings.hpp diff --git a/bindings/python/CMakeLists.txt b/bindings/python/CMakeLists.txt index 2a4bceee3..58209d883 100644 --- a/bindings/python/CMakeLists.txt +++ b/bindings/python/CMakeLists.txt @@ -98,7 +98,7 @@ set(SOURCE_LIST src/pipeline/node/UVCBindings.cpp src/pipeline/node/ToFBindings.cpp src/pipeline/node/PointCloudBindings.cpp - src/pipeline/node/SyncBindings.cpp + src/pipeline/node/PipelineEventAggregationBindings.cpp src/pipeline/node/MessageDemuxBindings.cpp src/pipeline/node/HostNodeBindings.cpp src/pipeline/node/RecordBindings.cpp @@ -136,6 +136,8 @@ set(SOURCE_LIST src/pipeline/datatype/PointCloudConfigBindings.cpp src/pipeline/datatype/ObjectTrackerConfigBindings.cpp src/pipeline/datatype/PointCloudDataBindings.cpp + src/pipeline/datatype/PipelineEventBindings.cpp + src/pipeline/datatype/PipelineStateBindings.cpp src/pipeline/datatype/TransformDataBindings.cpp src/pipeline/datatype/ImageAlignConfigBindings.cpp src/pipeline/datatype/ImgAnnotationsBindings.cpp @@ -149,6 +151,7 @@ set(SOURCE_LIST src/remote_connection/RemoteConnectionBindings.cpp src/utility/EventsManagerBindings.cpp + src/utility/PipelineEventDispatcherBindings.cpp ) if(DEPTHAI_MERGED_TARGET) list(APPEND SOURCE_LIST @@ -412,4 +415,4 @@ endif() ######################## if (DEPTHAI_PYTHON_ENABLE_EXAMPLES) add_subdirectory(../../examples/python ${CMAKE_BINARY_DIR}/examples/python) -endif() \ No newline at end of file +endif() diff --git a/bindings/python/src/DatatypeBindings.cpp b/bindings/python/src/DatatypeBindings.cpp index 69d1c922a..8276647fa 100644 --- a/bindings/python/src/DatatypeBindings.cpp +++ b/bindings/python/src/DatatypeBindings.cpp @@ -30,6 +30,8 @@ void bind_tracklets(pybind11::module& m, void* pCallstack); void bind_benchmarkreport(pybind11::module& m, void* pCallstack); void bind_pointcloudconfig(pybind11::module& m, void* pCallstack); void bind_pointclouddata(pybind11::module& m, void* pCallstack); +void bind_pipelineevent(pybind11::module& m, void* pCallstack); +void bind_pipelinestate(pybind11::module& m, void* pCallstack); void bind_transformdata(pybind11::module& m, void* pCallstack); void bind_rgbddata(pybind11::module& m, void* pCallstack); void bind_imagealignconfig(pybind11::module& m, void* pCallstack); @@ -71,6 +73,8 @@ void DatatypeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_benchmarkreport); callstack.push_front(bind_pointcloudconfig); callstack.push_front(bind_pointclouddata); + callstack.push_front(bind_pipelineevent); + callstack.push_front(bind_pipelinestate); callstack.push_front(bind_transformdata); callstack.push_front(bind_imagealignconfig); callstack.push_front(bind_imageannotations); @@ -131,5 +135,7 @@ void DatatypeBindings::bind(pybind11::module& m, void* pCallstack) { .value("PointCloudData", DatatypeEnum::PointCloudData) .value("ImageAlignConfig", DatatypeEnum::ImageAlignConfig) .value("ImgAnnotations", DatatypeEnum::ImgAnnotations) - .value("RGBDData", DatatypeEnum::RGBDData); + .value("RGBDData", DatatypeEnum::RGBDData) + .value("PipelineEvent", DatatypeEnum::PipelineEvent) + .value("PipelineState", DatatypeEnum::PipelineState); } diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp new file mode 100644 index 000000000..ea9a7552c --- /dev/null +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -0,0 +1,55 @@ +#include +#include + +#include "DatatypeBindings.hpp" +#include "pipeline/CommonBindings.hpp" + +// depthai +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +// pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_pipelineevent(pybind11::module& m, void* pCallstack) { + using namespace dai; + + py::class_, Buffer, std::shared_ptr> pipelineEvent(m, "PipelineEvent", DOC(dai, PipelineEvent)); + py::enum_ pipelineEventType(pipelineEvent, "EventType", DOC(dai, PipelineEvent, EventType)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + pipelineEventType.value("CUSTOM", PipelineEvent::EventType::CUSTOM) + .value("LOOP", PipelineEvent::EventType::LOOP) + .value("INPUT", PipelineEvent::EventType::INPUT) + .value("OUTPUT", PipelineEvent::EventType::OUTPUT) + .value("FUNC_CALL", PipelineEvent::EventType::FUNC_CALL); + + // Message + pipelineEvent.def(py::init<>()) + .def("__repr__", &PipelineEvent::str) + .def_readwrite("nodeId", &PipelineEvent::nodeId, DOC(dai, PipelineEvent, nodeId)) + .def_readwrite("metadata", &PipelineEvent::metadata, DOC(dai, PipelineEvent, metadata)) + .def_readwrite("duration", &PipelineEvent::duration, DOC(dai, PipelineEvent, duration)) + .def_readwrite("type", &PipelineEvent::type, DOC(dai, PipelineEvent, type)) + .def_readwrite("source", &PipelineEvent::source, DOC(dai, PipelineEvent, source)) + .def("getTimestamp", &PipelineEvent::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) + .def("getTimestampDevice", &PipelineEvent::Buffer::getTimestampDevice, DOC(dai, Buffer, getTimestampDevice)) + .def("getSequenceNum", &PipelineEvent::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) + .def("setTimestamp", &PipelineEvent::setTimestamp, DOC(dai, PipelineEvent, setTimestamp)) + .def("setTimestampDevice", &PipelineEvent::setTimestampDevice, DOC(dai, PipelineEvent, setTimestampDevice)) + .def("setSequenceNum", &PipelineEvent::setSequenceNum, DOC(dai, PipelineEvent, setSequenceNum)); +} diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp new file mode 100644 index 000000000..8b59cc6ea --- /dev/null +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -0,0 +1,57 @@ +#include +#include + +#include "DatatypeBindings.hpp" +#include "pipeline/CommonBindings.hpp" + +// depthai +#include "depthai/pipeline/datatype/PipelineState.hpp" + +// pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_pipelinestate(pybind11::module& m, void* pCallstack) { + using namespace dai; + + py::class_ nodeState(m, "NodeState", DOC(dai, NodeState)); + py::class_ nodeStateTiming(nodeState, "Timing", DOC(dai, NodeState, Timing)); + py::class_, Buffer, std::shared_ptr> pipelineState(m, "PipelineState", DOC(dai, PipelineState)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + nodeStateTiming.def(py::init<>()) + .def("__repr__", &NodeState::Timing::str) + .def_readwrite("averageMicros", &NodeState::Timing::averageMicros, DOC(dai, NodeState, Timing, averageMicros)) + .def_readwrite("stdDevMicros", &NodeState::Timing::stdDevMicros, DOC(dai, NodeState, Timing, stdDevMicros)); + + nodeState.def(py::init<>()) + .def("__repr__", &NodeState::str) + .def_readwrite("events", &NodeState::events, DOC(dai, NodeState, events)) + .def_readwrite("timingsByType", &NodeState::timingsByType, DOC(dai, NodeState, timingsByType)) + .def_readwrite("timingsByInstance", &NodeState::timingsByInstance, DOC(dai, NodeState, timingsByInstance)); + + // Message + pipelineState.def(py::init<>()) + .def("__repr__", &PipelineState::str) + .def_readwrite("nodeStates", &PipelineState::nodeStates, DOC(dai, PipelineState, nodeStates)) + .def("getTimestamp", &PipelineState::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) + .def("getTimestampDevice", &PipelineState::Buffer::getTimestampDevice, DOC(dai, Buffer, getTimestampDevice)) + .def("getSequenceNum", &PipelineState::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) + .def("setTimestamp", &PipelineState::setTimestamp, DOC(dai, PipelineState, setTimestamp)) + .def("setTimestampDevice", &PipelineState::setTimestampDevice, DOC(dai, PipelineState, setTimestampDevice)) + .def("setSequenceNum", &PipelineState::setSequenceNum, DOC(dai, PipelineState, setSequenceNum)); +} diff --git a/bindings/python/src/pipeline/node/NodeBindings.cpp b/bindings/python/src/pipeline/node/NodeBindings.cpp index d58ac38a9..4237ba403 100644 --- a/bindings/python/src/pipeline/node/NodeBindings.cpp +++ b/bindings/python/src/pipeline/node/NodeBindings.cpp @@ -153,6 +153,7 @@ void bind_uvc(pybind11::module& m, void* pCallstack); void bind_thermal(pybind11::module& m, void* pCallstack); void bind_tof(pybind11::module& m, void* pCallstack); void bind_pointcloud(pybind11::module& m, void* pCallstack); +void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack); void bind_sync(pybind11::module& m, void* pCallstack); void bind_messagedemux(pybind11::module& m, void* pCallstack); void bind_hostnode(pybind11::module& m, void* pCallstack); @@ -202,6 +203,7 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_thermal); callstack.push_front(bind_tof); callstack.push_front(bind_pointcloud); + callstack.push_front(bind_pipelineeventaggregation); callstack.push_front(bind_sync); callstack.push_front(bind_messagedemux); callstack.push_front(bind_hostnode); diff --git a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp new file mode 100644 index 000000000..d28be67f7 --- /dev/null +++ b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp @@ -0,0 +1,38 @@ +#include "Common.hpp" +#include "depthai/pipeline/node/PipelineEventAggregation.hpp" +#include "depthai/properties/PipelineEventAggregationProperties.hpp" + +void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack) { + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_ pipelineEventAggregationProperties( + m, "PipelineEventAggregationProperties", DOC(dai, PipelineEventAggregationProperties)); + auto pipelineEventAggregation = ADD_NODE(PipelineEventAggregation); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Properties + pipelineEventAggregationProperties.def_readwrite("aggregationWindowSize", &PipelineEventAggregationProperties::aggregationWindowSize) + .def_readwrite("eventBatchSize", &PipelineEventAggregationProperties::eventBatchSize) + .def_readwrite("sendEvents", &PipelineEventAggregationProperties::sendEvents); + + // Node + pipelineEventAggregation.def_readonly("out", &PipelineEventAggregation::out, DOC(dai, node, PipelineEventAggregation, out)) + .def_readonly("inputs", &PipelineEventAggregation::inputs, DOC(dai, node, PipelineEventAggregation, inputs)) + .def("setRunOnHost", &PipelineEventAggregation::setRunOnHost, py::arg("runOnHost"), DOC(dai, node, PipelineEventAggregation, setRunOnHost)) + .def("runOnHost", &PipelineEventAggregation::runOnHost, DOC(dai, node, PipelineEventAggregation, runOnHost)); + daiNodeModule.attr("PipelineEventAggregation").attr("Properties") = pipelineEventAggregationProperties; +} diff --git a/bindings/python/src/py_bindings.cpp b/bindings/python/src/py_bindings.cpp index 122d03016..cbbb4400e 100644 --- a/bindings/python/src/py_bindings.cpp +++ b/bindings/python/src/py_bindings.cpp @@ -37,6 +37,7 @@ #include "pipeline/node/NodeBindings.hpp" #include "remote_connection/RemoteConnectionBindings.hpp" #include "utility/EventsManagerBindings.hpp" +#include "utility/PipelineEventDispatcherBindings.hpp" #ifdef DEPTHAI_HAVE_OPENCV_SUPPORT #include #endif @@ -87,6 +88,7 @@ PYBIND11_MODULE(depthai, m) callstack.push_front(&CalibrationHandlerBindings::bind); callstack.push_front(&ZooBindings::bind); callstack.push_front(&EventsManagerBindings::bind); + callstack.push_front(&PipelineEventDispatcherBindings::bind); callstack.push_front(&RemoteConnectionBindings::bind); callstack.push_front(&FilterParamsBindings::bind); // end of the callstack diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp new file mode 100644 index 000000000..42f2b995f --- /dev/null +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp @@ -0,0 +1,27 @@ +#include "PipelineEventDispatcherBindings.hpp" +#include "depthai/utility/PipelineEventDispatcher.hpp" + +void PipelineEventDispatcherBindings::bind(pybind11::module& m, void* pCallstack) { + using namespace dai; + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + using namespace dai::utility; + auto pipelineEventDispatcher = py::class_(m, "PipelineEventDispatcher"); + + pipelineEventDispatcher + .def(py::init(), py::arg("nodeId"), py::arg("output")) + .def("addEvent", &PipelineEventDispatcher::addEvent, py::arg("source"), py::arg("type"), DOC(dai, utility, PipelineEventDispatcher, addEvent)) + .def("startEvent", &PipelineEventDispatcher::startEvent, py::arg("source"), py::arg("metadata") = std::nullopt, DOC(dai, utility, PipelineEventDispatcher, startEvent)) + .def("endEvent", &PipelineEventDispatcher::endEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, endEvent)) + .def("pingEvent", &PipelineEventDispatcher::pingEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, pingEvent)); +} diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp new file mode 100644 index 000000000..36e074193 --- /dev/null +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp @@ -0,0 +1,8 @@ +#pragma once + +// pybind +#include "pybind11_common.hpp" + +struct PipelineEventDispatcherBindings { + static void bind(pybind11::module& m, void* pCallstack); +}; diff --git a/include/depthai/pipeline/node/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/PipelineEventAggregation.hpp index f2636a4a9..9d9910fae 100644 --- a/include/depthai/pipeline/node/PipelineEventAggregation.hpp +++ b/include/depthai/pipeline/node/PipelineEventAggregation.hpp @@ -25,14 +25,9 @@ class PipelineEventAggregation : public DeviceNodeCRTP getMessage(const std::shared_ptr getProtoMessage(utility::ByteP case DatatypeEnum::DynamicCalibrationResult: case DatatypeEnum::CalibrationQuality: case DatatypeEnum::CoverageData: + case DatatypeEnum::PipelineEvent: + case DatatypeEnum::PipelineState: throw std::runtime_error("Cannot replay message type: " + std::to_string((int)datatype)); } return {}; diff --git a/src/utility/ProtoSerialize.cpp b/src/utility/ProtoSerialize.cpp index 300600579..d94870502 100644 --- a/src/utility/ProtoSerialize.cpp +++ b/src/utility/ProtoSerialize.cpp @@ -170,6 +170,8 @@ bool deserializationSupported(DatatypeEnum datatype) { case DatatypeEnum::DynamicCalibrationResult: case DatatypeEnum::CalibrationQuality: case DatatypeEnum::CoverageData: + case DatatypeEnum::PipelineEvent: + case DatatypeEnum::PipelineState: return false; } return false; From 9876101176eaa8430120a66af54ea9c6ff691234 Mon Sep 17 00:00:00 2001 From: asahtik Date: Tue, 23 Sep 2025 15:44:49 +0200 Subject: [PATCH 04/40] Integrate pipeline events --- CMakeLists.txt | 2 +- .../datatype/PipelineEventBindings.cpp | 1 + .../node/PipelineEventAggregationBindings.cpp | 4 +-- .../PipelineEventDispatcherBindings.cpp | 3 +- include/depthai/pipeline/MessageQueue.hpp | 11 +++++++- include/depthai/pipeline/Node.hpp | 11 ++++++-- include/depthai/pipeline/ThreadedNode.hpp | 10 ++++++- .../pipeline/datatype/PipelineEvent.hpp | 3 +- .../pipeline/datatype/PipelineState.hpp | 2 +- .../PipelineEventAggregation.hpp | 2 +- .../PipelineEventAggregationProperties.hpp | 0 include/depthai/utility/CircularBuffer.hpp | 2 +- .../utility/PipelineEventDispatcher.hpp | 21 +++++++++----- .../PipelineEventDispatcherInterface.hpp | 19 +++++++++++++ src/pipeline/MessageQueue.cpp | 11 +++++++- src/pipeline/Node.cpp | 4 +++ src/pipeline/Pipeline.cpp | 28 +++++++++++++++++++ src/pipeline/ThreadedNode.cpp | 10 ++++++- .../PipelineEventAggregation.cpp | 23 ++++++++------- src/utility/PipelineEventDispatcher.cpp | 14 ++++++++++ 20 files changed, 148 insertions(+), 33 deletions(-) rename include/depthai/pipeline/node/{ => internal}/PipelineEventAggregation.hpp (93%) rename include/depthai/properties/{ => internal}/PipelineEventAggregationProperties.hpp (100%) create mode 100644 include/depthai/utility/PipelineEventDispatcherInterface.hpp rename src/pipeline/node/{ => internal}/PipelineEventAggregation.cpp (90%) diff --git a/CMakeLists.txt b/CMakeLists.txt index bd1ac83ab..162675b4c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -279,6 +279,7 @@ set(TARGET_CORE_SOURCES src/pipeline/ThreadedNode.cpp src/pipeline/DeviceNode.cpp src/pipeline/DeviceNodeGroup.cpp + src/pipeline/node/internal/PipelineEventAggregation.cpp src/pipeline/node/internal/XLinkIn.cpp src/pipeline/node/internal/XLinkOut.cpp src/pipeline/node/ColorCamera.cpp @@ -318,7 +319,6 @@ set(TARGET_CORE_SOURCES src/pipeline/node/host/RGBD.cpp src/pipeline/datatype/DatatypeEnum.cpp src/pipeline/node/PointCloud.cpp - src/pipeline/node/PipelineEventAggregation.cpp src/pipeline/datatype/Buffer.cpp src/pipeline/datatype/ImgFrame.cpp src/pipeline/datatype/ImgTransformations.cpp diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp index ea9a7552c..89478d816 100644 --- a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -43,6 +43,7 @@ void bind_pipelineevent(pybind11::module& m, void* pCallstack) { .def("__repr__", &PipelineEvent::str) .def_readwrite("nodeId", &PipelineEvent::nodeId, DOC(dai, PipelineEvent, nodeId)) .def_readwrite("metadata", &PipelineEvent::metadata, DOC(dai, PipelineEvent, metadata)) + .def_readwrite("timestamp", &PipelineEvent::timestamp, DOC(dai, PipelineEvent, timestamp)) .def_readwrite("duration", &PipelineEvent::duration, DOC(dai, PipelineEvent, duration)) .def_readwrite("type", &PipelineEvent::type, DOC(dai, PipelineEvent, type)) .def_readwrite("source", &PipelineEvent::source, DOC(dai, PipelineEvent, source)) diff --git a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp index d28be67f7..c9e675f30 100644 --- a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp +++ b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp @@ -1,6 +1,6 @@ #include "Common.hpp" -#include "depthai/pipeline/node/PipelineEventAggregation.hpp" -#include "depthai/properties/PipelineEventAggregationProperties.hpp" +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" +#include "depthai/properties/internal/PipelineEventAggregationProperties.hpp" void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack) { using namespace dai; diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp index 42f2b995f..422edd72a 100644 --- a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp @@ -19,7 +19,8 @@ void PipelineEventDispatcherBindings::bind(pybind11::module& m, void* pCallstack auto pipelineEventDispatcher = py::class_(m, "PipelineEventDispatcher"); pipelineEventDispatcher - .def(py::init(), py::arg("nodeId"), py::arg("output")) + .def(py::init(), py::arg("output")) + .def("setNodeId", &PipelineEventDispatcher::setNodeId, py::arg("id"), DOC(dai, utility, PipelineEventDispatcher, setNodeId)) .def("addEvent", &PipelineEventDispatcher::addEvent, py::arg("source"), py::arg("type"), DOC(dai, utility, PipelineEventDispatcher, addEvent)) .def("startEvent", &PipelineEventDispatcher::startEvent, py::arg("source"), py::arg("metadata") = std::nullopt, DOC(dai, utility, PipelineEventDispatcher, startEvent)) .def("endEvent", &PipelineEventDispatcher::endEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, endEvent)) diff --git a/include/depthai/pipeline/MessageQueue.hpp b/include/depthai/pipeline/MessageQueue.hpp index e7667e623..515c5d773 100644 --- a/include/depthai/pipeline/MessageQueue.hpp +++ b/include/depthai/pipeline/MessageQueue.hpp @@ -7,6 +7,7 @@ // project #include "depthai/pipeline/datatype/ADatatype.hpp" #include "depthai/utility/LockingQueue.hpp" +#include "depthai/utility/PipelineEventDispatcherInterface.hpp" // shared namespace dai { @@ -37,11 +38,15 @@ class MessageQueue : public std::enable_shared_from_this { private: void callCallbacks(std::shared_ptr msg); + std::shared_ptr pipelineEventDispatcher; public: // DataOutputQueue constructor explicit MessageQueue(unsigned int maxSize = 16, bool blocking = true); - explicit MessageQueue(std::string name, unsigned int maxSize = 16, bool blocking = true); + explicit MessageQueue(std::string name, + unsigned int maxSize = 16, + bool blocking = true, + std::shared_ptr pipelineEventDispatcher = nullptr); MessageQueue(const MessageQueue& c) : enable_shared_from_this(c), queue(c.queue), name(c.name), callbacks(c.callbacks), uniqueCallbackId(c.uniqueCallbackId){}; @@ -196,11 +201,13 @@ class MessageQueue : public std::enable_shared_from_this { */ template std::shared_ptr tryGet() { + if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(name); if(queue.isDestroyed()) { throw QueueException(CLOSED_QUEUE_MESSAGE); } std::shared_ptr val = nullptr; if(!queue.tryPop(val)) return nullptr; + if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name); return std::dynamic_pointer_cast(val); } @@ -220,10 +227,12 @@ class MessageQueue : public std::enable_shared_from_this { */ template std::shared_ptr get() { + if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(name); std::shared_ptr val = nullptr; if(!queue.waitAndPop(val)) { throw QueueException(CLOSED_QUEUE_MESSAGE); } + if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name); return std::dynamic_pointer_cast(val); } diff --git a/include/depthai/pipeline/Node.hpp b/include/depthai/pipeline/Node.hpp index a491bf894..aa5bdbc33 100644 --- a/include/depthai/pipeline/Node.hpp +++ b/include/depthai/pipeline/Node.hpp @@ -20,6 +20,7 @@ #include "depthai/capabilities/Capability.hpp" #include "depthai/pipeline/datatype/DatatypeEnum.hpp" #include "depthai/properties/Properties.hpp" +#include "depthai/utility/PipelineEventDispatcherInterface.hpp" // libraries #include @@ -83,6 +84,8 @@ class Node : public std::enable_shared_from_this { std::vector inputMapRefs; std::vector*> nodeRefs; + std::shared_ptr pipelineEventDispatcher; + // helpers for setting refs void setOutputRefs(std::initializer_list l); void setOutputRefs(Output* outRef); @@ -129,11 +132,12 @@ class Node : public std::enable_shared_from_this { std::vector queueConnections; Type type = Type::MSender; // Slave sender not supported yet OutputDescription desc; + std::shared_ptr pipelineEventDispatcher; public: // std::vector possibleCapabilities; - Output(Node& par, OutputDescription desc, bool ref = true) : parent(par), desc(std::move(desc)) { + Output(Node& par, OutputDescription desc, bool ref = true) : parent(par), desc(std::move(desc)), pipelineEventDispatcher(par.pipelineEventDispatcher) { // Place oneself to the parents references if(ref) { par.setOutputRefs(this); @@ -141,6 +145,9 @@ class Node : public std::enable_shared_from_this { if(getName().empty()) { setName(par.createUniqueOutputName()); } + if(pipelineEventDispatcher && getName() != "pipelineEventOutput") { + pipelineEventDispatcher->addEvent(getName(), PipelineEvent::EventType::OUTPUT); + } } Node& getParent() { @@ -344,7 +351,7 @@ class Node : public std::enable_shared_from_this { public: std::vector possibleDatatypes; explicit Input(Node& par, InputDescription desc, bool ref = true) - : MessageQueue(std::move(desc.name), desc.queueSize, desc.blocking), + : MessageQueue(std::move(desc.name), desc.queueSize, desc.blocking, par.pipelineEventDispatcher), parent(par), waitForMessage(desc.waitForMessage), possibleDatatypes(std::move(desc.types)) { diff --git a/include/depthai/pipeline/ThreadedNode.hpp b/include/depthai/pipeline/ThreadedNode.hpp index e21768b83..30c2394a8 100644 --- a/include/depthai/pipeline/ThreadedNode.hpp +++ b/include/depthai/pipeline/ThreadedNode.hpp @@ -5,14 +5,22 @@ #include "depthai/utility/AtomicBool.hpp" #include "depthai/utility/JoiningThread.hpp" #include "depthai/utility/spimpl.h" +#include "depthai/utility/PipelineEventDispatcher.hpp" namespace dai { class ThreadedNode : public Node { + friend class PipelineImpl; + private: JoiningThread thread; AtomicBool running{false}; + protected: + Output pipelineEventOutput{*this, {"pipelineEventOutput", DEFAULT_GROUP, {{{DatatypeEnum::PipelineEvent, false}}}}}; + + void initPipelineEventDispatcher(int64_t nodeId); + public: using Node::Node; ThreadedNode(); @@ -43,7 +51,7 @@ class ThreadedNode : public Node { virtual void run() = 0; // check if still running - bool isRunning() const; + bool isRunning(); /** * @brief Sets the logging severity level for this node. diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp index 9ea1b92f2..9b2b67672 100644 --- a/include/depthai/pipeline/datatype/PipelineEvent.hpp +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -25,6 +25,7 @@ class PipelineEvent : public Buffer { int64_t nodeId = -1; std::optional metadata; + uint64_t timestamp {0}; uint64_t duration {0}; // Duration in microseconds EventType type = EventType::CUSTOM; std::string source; @@ -34,7 +35,7 @@ class PipelineEvent : public Buffer { datatype = DatatypeEnum::PipelineEvent; }; - DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, metadata, duration, type, source); + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, metadata, timestamp, duration, type, source); }; } // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index 64ddbbf34..159dfdbc3 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -30,7 +30,7 @@ class PipelineState : public Buffer { PipelineState() = default; virtual ~PipelineState() = default; - std::unordered_map> nodeStates; + std::unordered_map nodeStates; void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { metadata = utility::serialize(*this); diff --git a/include/depthai/pipeline/node/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp similarity index 93% rename from include/depthai/pipeline/node/PipelineEventAggregation.hpp rename to include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp index 9d9910fae..3dbce6149 100644 --- a/include/depthai/pipeline/node/PipelineEventAggregation.hpp +++ b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp @@ -3,7 +3,7 @@ #include // shared -#include +#include namespace dai { namespace node { diff --git a/include/depthai/properties/PipelineEventAggregationProperties.hpp b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp similarity index 100% rename from include/depthai/properties/PipelineEventAggregationProperties.hpp rename to include/depthai/properties/internal/PipelineEventAggregationProperties.hpp diff --git a/include/depthai/utility/CircularBuffer.hpp b/include/depthai/utility/CircularBuffer.hpp index a0f631d60..6e386eb19 100644 --- a/include/depthai/utility/CircularBuffer.hpp +++ b/include/depthai/utility/CircularBuffer.hpp @@ -11,7 +11,7 @@ class CircularBuffer { CircularBuffer(size_t size) : maxSize(size) { buffer.reserve(size); } - void add(int value) { + void add(T value) { if(buffer.size() < maxSize) { buffer.push_back(value); } else { diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp index a26d3546f..78a47ab8c 100644 --- a/include/depthai/utility/PipelineEventDispatcher.hpp +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -8,11 +8,12 @@ #include "depthai/pipeline/Node.hpp" #include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "PipelineEventDispatcherInterface.hpp" namespace dai { namespace utility { -class PipelineEventDispatcher { +class PipelineEventDispatcher : public PipelineEventDispatcherInterface { struct EventStatus { PipelineEvent::EventType type; std::chrono::microseconds duration; @@ -21,19 +22,25 @@ class PipelineEventDispatcher { std::optional metadata; }; - int64_t nodeId; + int64_t nodeId = -1; std::unordered_map events; Node::Output* out = nullptr; + void checkNodeId(); + + uint32_t sequenceNum = 0; + public: PipelineEventDispatcher() = delete; - PipelineEventDispatcher(int64_t nodeId, Node::Output* output) : nodeId(nodeId), out(output) {} + PipelineEventDispatcher(Node::Output* output) : out(output) {} + + void setNodeId(int64_t id) override; - void addEvent(const std::string& source, PipelineEvent::EventType type); + void addEvent(const std::string& source, PipelineEvent::EventType type) override; - void startEvent(const std::string& source, std::optional metadata = std::nullopt); // Start event with a start and an end - void endEvent(const std::string& source); // Stop event with a start and an end - void pingEvent(const std::string& source); // Event where stop and start are the same (eg. loop) + void startEvent(const std::string& source, std::optional metadata = std::nullopt) override; // Start event with a start and an end + void endEvent(const std::string& source) override; // Stop event with a start and an end + void pingEvent(const std::string& source) override; // Event where stop and start are the same (eg. loop) }; } // namespace utility diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp new file mode 100644 index 000000000..34bf526f0 --- /dev/null +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -0,0 +1,19 @@ +#pragma once + +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +namespace utility { + +class PipelineEventDispatcherInterface { +public: + virtual ~PipelineEventDispatcherInterface() = default; + virtual void setNodeId(int64_t id) = 0; + virtual void addEvent(const std::string& source, PipelineEvent::EventType type) = 0; + virtual void startEvent(const std::string& source, std::optional metadata = std::nullopt) = 0; // Start event with a start and an end + virtual void endEvent(const std::string& source) = 0; // Stop event with a start and an end + virtual void pingEvent(const std::string& source) = 0; // Event where stop and start are the same (eg. loop) +}; + +} // namespace utility +} // namespace dai diff --git a/src/pipeline/MessageQueue.cpp b/src/pipeline/MessageQueue.cpp index 28f082f16..b8459fb46 100644 --- a/src/pipeline/MessageQueue.cpp +++ b/src/pipeline/MessageQueue.cpp @@ -14,10 +14,19 @@ // Additions #include "spdlog/fmt/bin_to_hex.h" #include "spdlog/fmt/chrono.h" +#include "utility/PipelineEventDispatcherInterface.hpp" namespace dai { -MessageQueue::MessageQueue(std::string name, unsigned int maxSize, bool blocking) : queue(maxSize, blocking), name(std::move(name)) {} +MessageQueue::MessageQueue(std::string name, + unsigned int maxSize, + bool blocking, + std::shared_ptr pipelineEventDispatcher) + : queue(maxSize, blocking), name(std::move(name)), pipelineEventDispatcher(pipelineEventDispatcher) { + if(pipelineEventDispatcher) { + pipelineEventDispatcher->addEvent(name, PipelineEvent::EventType::INPUT); + } +} MessageQueue::MessageQueue(unsigned int maxSize, bool blocking) : queue(maxSize, blocking) {} diff --git a/src/pipeline/Node.cpp b/src/pipeline/Node.cpp index 0bec10444..bfc024dca 100644 --- a/src/pipeline/Node.cpp +++ b/src/pipeline/Node.cpp @@ -239,9 +239,11 @@ void Node::Output::send(const std::shared_ptr& msg) { // } // } // } + if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(getName()); for(auto& messageQueue : connectedInputs) { messageQueue->send(msg); } + if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(getName()); } bool Node::Output::trySend(const std::shared_ptr& msg) { @@ -261,9 +263,11 @@ bool Node::Output::trySend(const std::shared_ptr& msg) { // } // } // } + if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(getName()); for(auto& messageQueue : connectedInputs) { success &= messageQueue->trySend(msg); } + if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(getName()); return success; } diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 7076d2504..f92523b42 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -4,6 +4,7 @@ #include "depthai/device/CalibrationHandler.hpp" #include "depthai/pipeline/ThreadedHostNode.hpp" +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" #include "depthai/pipeline/node/internal/XLinkIn.hpp" #include "depthai/pipeline/node/internal/XLinkInHost.hpp" #include "depthai/pipeline/node/internal/XLinkOut.hpp" @@ -626,6 +627,33 @@ void PipelineImpl::build() { } } + // Create pipeline event aggregator node and link + // Check if any nodes are on host or device + bool hasHostNodes = false; + bool hasDeviceNodes = false; + for(const auto& node : getAllNodes()) { + if(node->runOnHost()) { + hasHostNodes = true; + } else { + hasDeviceNodes = true; + } + } + std::shared_ptr hostEventAgg = nullptr; + std::shared_ptr deviceEventAgg = nullptr; + if(hasHostNodes) hostEventAgg = parent.create(); + if(hasDeviceNodes) deviceEventAgg = parent.create(); + for(auto& node : getAllNodes()) { + auto threadedNode = std::dynamic_pointer_cast(node); + if(threadedNode) { + if(node->runOnHost() && hostEventAgg) { + threadedNode->pipelineEventOutput.link(hostEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } else if(!node->runOnHost() && deviceEventAgg) { + threadedNode->pipelineEventOutput.link(deviceEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } + } + } + // TODO: link outputs of event aggregation nodes + // Go through the build stages sequentially for(const auto& node : getAllNodes()) { node->buildStage1(); diff --git a/src/pipeline/ThreadedNode.cpp b/src/pipeline/ThreadedNode.cpp index 3f099b782..8455ed571 100644 --- a/src/pipeline/ThreadedNode.cpp +++ b/src/pipeline/ThreadedNode.cpp @@ -18,6 +18,12 @@ ThreadedNode::ThreadedNode() { level = Logging::parseLevel(envLevel); } pimpl->logger->set_level(level); + pipelineEventDispatcher = std::make_shared(&pipelineEventOutput); +} + +void ThreadedNode::initPipelineEventDispatcher(int64_t nodeId) { + pipelineEventDispatcher->setNodeId(nodeId); + pipelineEventDispatcher->addEvent("_mainLoop", PipelineEvent::EventType::LOOP); } void ThreadedNode::start() { @@ -30,6 +36,7 @@ void ThreadedNode::start() { running = true; thread = std::thread([this]() { try { + initPipelineEventDispatcher(this->id); run(); } catch(const MessageQueue::QueueException& ex) { // catch the exception and stop the node @@ -80,7 +87,8 @@ dai::LogLevel ThreadedNode::getLogLevel() const { return spdlogLevelToLogLevel(pimpl->logger->level(), LogLevel::WARN); } -bool ThreadedNode::isRunning() const { +bool ThreadedNode::isRunning() { + this->pipelineEventDispatcher->pingEvent("_mainLoop"); return running; } diff --git a/src/pipeline/node/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp similarity index 90% rename from src/pipeline/node/PipelineEventAggregation.cpp rename to src/pipeline/node/internal/PipelineEventAggregation.cpp index bb4be94b9..0f1ed3aa7 100644 --- a/src/pipeline/node/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -1,4 +1,4 @@ -#include "depthai/pipeline/node/PipelineEventAggregation.hpp" +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" #include "depthai/pipeline/datatype/PipelineEvent.hpp" #include "depthai/pipeline/datatype/PipelineState.hpp" @@ -11,14 +11,19 @@ class NodeEventAggregation { int windowSize; public: - NodeEventAggregation(int windowSize) : windowSize(windowSize) {} + NodeEventAggregation(int windowSize) : windowSize(windowSize), eventsBuffer(windowSize) {} NodeState state; + utility::CircularBuffer eventsBuffer; std::unordered_map>> timingsBufferByType; std::unordered_map>> timingsBufferByInstance; + uint32_t eventCount = 0; + void add(PipelineEvent& event) { // TODO optimize avg - state.events.push_back(event); + ++eventCount; + eventsBuffer.add(event); + state.events = eventsBuffer.getBuffer(); if(timingsBufferByType.find(event.type) == timingsBufferByType.end()) { timingsBufferByType[event.type] = std::make_unique>(windowSize); } @@ -65,9 +70,6 @@ class NodeEventAggregation { state.timingsByInstance[event.source].stdDevMicros = (uint64_t)(std::sqrt(variance)); } } - void resetEvents() { - state.events.clear(); - } }; void PipelineEventAggregation::setRunOnHost(bool runOnHost) { @@ -99,14 +101,11 @@ void PipelineEventAggregation::run() { auto outState = std::make_shared(); bool shouldSend = false; for(auto& [nodeId, nodeState] : nodeStates) { - auto numEvents = nodeState.state.events.size(); - if(numEvents >= properties.eventBatchSize) { + if(nodeState.eventCount >= properties.eventBatchSize) { outState->nodeStates[nodeId] = nodeState.state; - if(!properties.sendEvents) outState->nodeStates[nodeId]->events.clear(); - nodeState.resetEvents(); + if(!properties.sendEvents) outState->nodeStates[nodeId].events.clear(); shouldSend = true; - } else { - outState->nodeStates[nodeId] = std::nullopt; + nodeState.eventCount = 0; } } if(shouldSend) out.send(outState); diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index ceb19451c..b0c59ef1d 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -4,6 +4,14 @@ namespace dai { namespace utility { +void PipelineEventDispatcher::checkNodeId() { + if(nodeId == -1) { + throw std::runtime_error("Node ID not set for PipelineEventDispatcher"); + } +} +void PipelineEventDispatcher::setNodeId(int64_t id) { + nodeId = id; +} void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent::EventType type) { if(events.find(source) != events.end()) { throw std::runtime_error("Event with name " + source + " already exists"); @@ -11,6 +19,7 @@ void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent: events[source] = {type, {}, {}, false, std::nullopt}; } void PipelineEventDispatcher::startEvent(const std::string& source, std::optional metadata) { + checkNodeId(); if(events.find(source) == events.end()) { throw std::runtime_error("Event with name " + source + " does not exist"); } @@ -23,6 +32,7 @@ void PipelineEventDispatcher::startEvent(const std::string& source, std::optiona event.metadata = metadata; } void PipelineEventDispatcher::endEvent(const std::string& source) { + checkNodeId(); auto now = std::chrono::steady_clock::now(); if(events.find(source) == events.end()) { @@ -37,9 +47,12 @@ void PipelineEventDispatcher::endEvent(const std::string& source) { PipelineEvent pipelineEvent; pipelineEvent.nodeId = nodeId; + pipelineEvent.timestamp = std::chrono::duration_cast(event.timestamp.time_since_epoch()).count(); pipelineEvent.duration = event.duration.count(); pipelineEvent.type = event.type; pipelineEvent.source = source; + pipelineEvent.sequenceNum = sequenceNum++; + pipelineEvent.setTimestampDevice(std::chrono::steady_clock::now()); if(out) { out->send(std::make_shared(pipelineEvent)); @@ -48,6 +61,7 @@ void PipelineEventDispatcher::endEvent(const std::string& source) { event.metadata = std::nullopt; } void PipelineEventDispatcher::pingEvent(const std::string& source) { + checkNodeId(); auto now = std::chrono::steady_clock::now(); if(events.find(source) == events.end()) { From b2024f053e42b3f47fc0ac46a351d7f8c88bddd3 Mon Sep 17 00:00:00 2001 From: asahtik Date: Wed, 24 Sep 2025 09:09:13 +0200 Subject: [PATCH 05/40] RVC4 FW: initial pipeline debugging impl --- cmake/Depthai/DepthaiDeviceRVC4Config.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake index 2dc99bffe..14c8b244a 100644 --- a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake +++ b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake @@ -4,4 +4,4 @@ set(DEPTHAI_DEVICE_RVC4_MATURITY "snapshot") # "version if applicable" # set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+93f7b75a885aa32f44c5e9f53b74470c49d2b1af") -set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+f76251a3e31c957cd21183d6170da4f53aac475b") +set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+34d24327d3bf906cd73f6277fd10752a6e631608") From 75e93861ef847292b320e7318a558f0fbdebf2fc Mon Sep 17 00:00:00 2001 From: asahtik Date: Wed, 24 Sep 2025 14:41:03 +0200 Subject: [PATCH 06/40] Pipeline events bugfixes, pipeline state merge node --- CMakeLists.txt | 1 + include/depthai/pipeline/Pipeline.hpp | 10 ++++ .../node/internal/PipelineStateMerge.hpp | 32 +++++++++++ src/pipeline/MessageQueue.cpp | 2 +- src/pipeline/Node.cpp | 30 +++++----- src/pipeline/Pipeline.cpp | 23 +++++++- src/pipeline/datatype/DatatypeEnum.cpp | 6 ++ .../internal/PipelineEventAggregation.cpp | 4 ++ .../node/internal/PipelineStateMerge.cpp | 55 +++++++++++++++++++ src/utility/PipelineEventDispatcher.cpp | 10 +++- 10 files changed, 153 insertions(+), 20 deletions(-) create mode 100644 include/depthai/pipeline/node/internal/PipelineStateMerge.hpp create mode 100644 src/pipeline/node/internal/PipelineStateMerge.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 162675b4c..74bb0382c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -280,6 +280,7 @@ set(TARGET_CORE_SOURCES src/pipeline/DeviceNode.cpp src/pipeline/DeviceNodeGroup.cpp src/pipeline/node/internal/PipelineEventAggregation.cpp + src/pipeline/node/internal/PipelineStateMerge.cpp src/pipeline/node/internal/XLinkIn.cpp src/pipeline/node/internal/XLinkOut.cpp src/pipeline/node/ColorCamera.cpp diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 83457a05f..0a47639f8 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -22,6 +22,7 @@ #include "depthai/pipeline/PipelineSchema.hpp" #include "depthai/properties/GlobalProperties.hpp" #include "depthai/utility/RecordReplay.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" namespace dai { @@ -85,6 +86,9 @@ class PipelineImpl : public std::enable_shared_from_this { bool isHostOnly() const; bool isDeviceOnly() const; + // Pipeline state getters + std::shared_ptr getPipelineState(); + // Must be incremented and unique for each node Node::Id latestId = 0; // Pipeline asset manager @@ -115,6 +119,9 @@ class PipelineImpl : public std::enable_shared_from_this { bool removeRecordReplayFiles = true; std::string defaultDeviceId; + // Pipeline events + std::shared_ptr pipelineStateOut; + // Output queues std::vector> outputQueues; @@ -506,6 +513,9 @@ class Pipeline { /// Record and Replay void enableHolisticRecord(const RecordConfig& config); void enableHolisticReplay(const std::string& pathToRecording); + + // Pipeline state getters + std::shared_ptr getPipelineState(); }; } // namespace dai diff --git a/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp b/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp new file mode 100644 index 000000000..dd84f2952 --- /dev/null +++ b/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp @@ -0,0 +1,32 @@ +#pragma once + +#include "depthai/pipeline/ThreadedHostNode.hpp" + +namespace dai { +namespace node { + +/** + * @brief PipelineStateMerge node. Merges PipelineState messages from device and host into a single output. + */ +class PipelineStateMerge : public CustomThreadedNode { + bool hasDeviceNodes = false; + bool hasHostNodes = false; + + public: + constexpr static const char* NAME = "PipelineStateMerge"; + + Input inputDevice{*this, {"inputDevice", DEFAULT_GROUP, false, 4, {{DatatypeEnum::PipelineState, false}}}}; + Input inputHost{*this, {"inputHost", DEFAULT_GROUP, false, 4, {{DatatypeEnum::PipelineState, false}}}}; + + std::shared_ptr build(bool hasDeviceNodes, bool hasHostNodes); + + /** + * Output message of type + */ + Output out{*this, {"out", DEFAULT_GROUP, {{{DatatypeEnum::PipelineState, false}}}}}; + + void run() override; +}; + +} // namespace node +} // namespace dai diff --git a/src/pipeline/MessageQueue.cpp b/src/pipeline/MessageQueue.cpp index b8459fb46..9d2aa464e 100644 --- a/src/pipeline/MessageQueue.cpp +++ b/src/pipeline/MessageQueue.cpp @@ -24,7 +24,7 @@ MessageQueue::MessageQueue(std::string name, std::shared_ptr pipelineEventDispatcher) : queue(maxSize, blocking), name(std::move(name)), pipelineEventDispatcher(pipelineEventDispatcher) { if(pipelineEventDispatcher) { - pipelineEventDispatcher->addEvent(name, PipelineEvent::EventType::INPUT); + pipelineEventDispatcher->addEvent(this->name, PipelineEvent::EventType::INPUT); } } diff --git a/src/pipeline/Node.cpp b/src/pipeline/Node.cpp index bfc024dca..7acbd6ae8 100644 --- a/src/pipeline/Node.cpp +++ b/src/pipeline/Node.cpp @@ -318,9 +318,10 @@ Node::OutputMap::OutputMap(Node& parent, Node::OutputDescription defaultOutput, Node::Output& Node::OutputMap::operator[](const std::string& key) { if(count({name, key}) == 0) { // Create using default and rename with group and key - Output output(parent, defaultOutput, false); - output.setGroup(name); - output.setName(key); + auto desc = defaultOutput; + desc.group = name; + desc.name = key; + Output output(parent, desc, false); insert({{name, key}, output}); } // otherwise just return reference to existing @@ -329,11 +330,11 @@ Node::Output& Node::OutputMap::operator[](const std::string& key) { Node::Output& Node::OutputMap::operator[](std::pair groupKey) { if(count(groupKey) == 0) { // Create using default and rename with group and key - Output output(parent, defaultOutput, false); - + auto desc = defaultOutput; // Uses \t (tab) as a special character to parse out as subgroup name - output.setGroup(fmt::format("{}\t{}", name, groupKey.first)); - output.setName(groupKey.second); + desc.group = fmt::format("{}\t{}", name, groupKey.first); + desc.name = groupKey.second; + Output output(parent, desc, false); insert(std::make_pair(groupKey, output)); } // otherwise just return reference to existing @@ -350,9 +351,10 @@ Node::InputMap::InputMap(Node& parent, Node::InputDescription description) : Inp Node::Input& Node::InputMap::operator[](const std::string& key) { if(count({name, key}) == 0) { // Create using default and rename with group and key - Input input(parent, defaultInput, false); - input.setGroup(name); - input.setName(key); + auto desc = defaultInput; + desc.group = name; + desc.name = key; + Input input(parent, desc, false); insert({{name, key}, input}); } // otherwise just return reference to existing @@ -361,11 +363,11 @@ Node::Input& Node::InputMap::operator[](const std::string& key) { Node::Input& Node::InputMap::operator[](std::pair groupKey) { if(count(groupKey) == 0) { // Create using default and rename with group and key - Input input(parent, defaultInput, false); - + auto desc = defaultInput; // Uses \t (tab) as a special character to parse out as subgroup name - input.setGroup(fmt::format("{}\t{}", name, groupKey.first)); - input.setName(groupKey.second); + desc.group = fmt::format("{}\t{}", name, groupKey.first); + desc.name = groupKey.second; + Input input(parent, desc, false); insert(std::make_pair(groupKey, input)); } // otherwise just return reference to existing diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index f92523b42..9d05b9580 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -12,6 +12,7 @@ #include "depthai/utility/Initialization.hpp" #include "pipeline/datatype/ImgFrame.hpp" #include "pipeline/node/DetectionNetwork.hpp" +#include "pipeline/node/internal/PipelineStateMerge.hpp" #include "utility/Compression.hpp" #include "utility/Environment.hpp" #include "utility/ErrorMacros.hpp" @@ -478,6 +479,11 @@ bool PipelineImpl::isDeviceOnly() const { return deviceOnly; } +std::shared_ptr PipelineImpl::getPipelineState() { + auto pipelineState = pipelineStateOut->get(); + return pipelineState; +} + void PipelineImpl::add(std::shared_ptr node) { if(node == nullptr) { throw std::invalid_argument(fmt::format("Given node pointer is null")); @@ -537,7 +543,6 @@ bool PipelineImpl::isBuilt() const { void PipelineImpl::build() { // TODO(themarpe) - add mutex and set running up ahead if(isBuild) return; - isBuild = true; if(defaultDevice) { auto recordPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_RECORD", "")); @@ -652,7 +657,16 @@ void PipelineImpl::build() { } } } - // TODO: link outputs of event aggregation nodes + auto stateMerge = parent.create()->build(hasDeviceNodes, hasHostNodes); + if(deviceEventAgg) { + deviceEventAgg->out.link(stateMerge->inputDevice); + } + if(hostEventAgg) { + hostEventAgg->out.link(stateMerge->inputHost); + } + pipelineStateOut = stateMerge->out.createOutputQueue(1, false); + + isBuild = true; // Go through the build stages sequentially for(const auto& node : getAllNodes()) { @@ -1065,4 +1079,9 @@ void Pipeline::enableHolisticReplay(const std::string& pathToRecording) { impl()->recordConfig.state = RecordConfig::RecordReplayState::REPLAY; impl()->enableHolisticRecordReplay = true; } + +std::shared_ptr Pipeline::getPipelineState() { + return impl()->getPipelineState(); +} + } // namespace dai diff --git a/src/pipeline/datatype/DatatypeEnum.cpp b/src/pipeline/datatype/DatatypeEnum.cpp index 32ec647eb..91bfb3231 100644 --- a/src/pipeline/datatype/DatatypeEnum.cpp +++ b/src/pipeline/datatype/DatatypeEnum.cpp @@ -36,6 +36,8 @@ const std::unordered_map> hierarchy = { DatatypeEnum::AprilTags, DatatypeEnum::BenchmarkReport, DatatypeEnum::MessageGroup, + DatatypeEnum::PipelineEvent, + DatatypeEnum::PipelineState, DatatypeEnum::PointCloudConfig, DatatypeEnum::PointCloudData, DatatypeEnum::RGBDData, @@ -73,6 +75,8 @@ const std::unordered_map> hierarchy = { DatatypeEnum::AprilTags, DatatypeEnum::BenchmarkReport, DatatypeEnum::MessageGroup, + DatatypeEnum::PipelineEvent, + DatatypeEnum::PipelineState, DatatypeEnum::PointCloudConfig, DatatypeEnum::PointCloudData, DatatypeEnum::RGBDData, @@ -108,6 +112,8 @@ const std::unordered_map> hierarchy = { {DatatypeEnum::AprilTags, {}}, {DatatypeEnum::BenchmarkReport, {}}, {DatatypeEnum::MessageGroup, {}}, + {DatatypeEnum::PipelineEvent, {}}, + {DatatypeEnum::PipelineState, {}}, {DatatypeEnum::PointCloudConfig, {}}, {DatatypeEnum::PointCloudData, {}}, {DatatypeEnum::RGBDData, {}}, diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 0f1ed3aa7..019452118 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -85,6 +85,7 @@ bool PipelineEventAggregation::runOnHost() const { void PipelineEventAggregation::run() { std::unordered_map nodeStates; + uint32_t sequenceNum = 0; while(isRunning()) { std::unordered_map> events; for(auto& [k, v] : inputs) { @@ -108,6 +109,9 @@ void PipelineEventAggregation::run() { nodeState.eventCount = 0; } } + outState->sequenceNum = sequenceNum++; + outState->setTimestamp(std::chrono::steady_clock::now()); + outState->tsDevice = outState->ts; if(shouldSend) out.send(outState); } } diff --git a/src/pipeline/node/internal/PipelineStateMerge.cpp b/src/pipeline/node/internal/PipelineStateMerge.cpp new file mode 100644 index 000000000..177ec3325 --- /dev/null +++ b/src/pipeline/node/internal/PipelineStateMerge.cpp @@ -0,0 +1,55 @@ +#include "depthai/pipeline/node/internal/PipelineStateMerge.hpp" + +#include "depthai/pipeline/datatype/PipelineState.hpp" + +#include "pipeline/ThreadedNodeImpl.hpp" + +namespace dai { +namespace node { + +std::shared_ptr PipelineStateMerge::build(bool hasDeviceNodes, bool hasHostNodes) { + this->hasDeviceNodes = hasDeviceNodes; + this->hasHostNodes = hasHostNodes; + return std::static_pointer_cast(shared_from_this()); +} + +void mergeStates(std::shared_ptr& outState, const std::shared_ptr& inState) { + for(const auto& [key, value] : inState->nodeStates) { + if(outState->nodeStates.find(key) != outState->nodeStates.end()) { + throw std::runtime_error("PipelineStateMerge: Duplicate node state for nodeId " + std::to_string(key)); + } else outState->nodeStates[key] = value; + } +} +void PipelineStateMerge::run() { + auto& logger = pimpl->logger; + if(!hasDeviceNodes && !hasHostNodes) { + logger->warn("PipelineStateMerge: both device and host nodes are disabled. Have you built the node?"); + } + uint32_t sequenceNum = 0; + while(isRunning()) { + auto outState = std::make_shared(); + if(hasDeviceNodes) { + auto deviceState = inputDevice.get(); + if(deviceState != nullptr) { + *outState = *deviceState; + } + } + if(hasHostNodes) { + auto hostState = inputHost.get(); + if(hostState != nullptr) { + if(hasDeviceNodes) { + // merge + mergeStates(outState, hostState); + auto minTimestamp = std::min(outState->getTimestamp(), hostState->getTimestamp()); + outState->setTimestamp(minTimestamp); + outState->sequenceNum = sequenceNum++; + } else { + *outState = *hostState; + } + } + } + out.send(outState); + } +} +} // namespace node +} // namespace dai diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index b0c59ef1d..2243f5d2a 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -1,5 +1,6 @@ #include "depthai/utility/PipelineEventDispatcher.hpp" #include +#include namespace dai { namespace utility { @@ -13,10 +14,12 @@ void PipelineEventDispatcher::setNodeId(int64_t id) { nodeId = id; } void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent::EventType type) { - if(events.find(source) != events.end()) { - throw std::runtime_error("Event with name " + source + " already exists"); + if(!source.empty()) { + if(events.find(source) != events.end()) { + throw std::runtime_error("Event with name '" + source + "' already exists"); + } + events[source] = {type, {}, {}, false, std::nullopt}; } - events[source] = {type, {}, {}, false, std::nullopt}; } void PipelineEventDispatcher::startEvent(const std::string& source, std::optional metadata) { checkNodeId(); @@ -53,6 +56,7 @@ void PipelineEventDispatcher::endEvent(const std::string& source) { pipelineEvent.source = source; pipelineEvent.sequenceNum = sequenceNum++; pipelineEvent.setTimestampDevice(std::chrono::steady_clock::now()); + pipelineEvent.ts = pipelineEvent.tsDevice; if(out) { out->send(std::make_shared(pipelineEvent)); From 99ce452b408e7f19bcce8561b45868bbcd3ece1d Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 25 Sep 2025 18:23:55 +0200 Subject: [PATCH 07/40] Some pipeline debugging bugfixes --- .../node/PipelineEventAggregationBindings.cpp | 2 +- include/depthai/pipeline/Node.hpp | 3 ++- .../node/internal/PipelineEventAggregation.hpp | 4 +++- src/pipeline/Pipeline.cpp | 18 ++++++++++++------ .../node/internal/PipelineEventAggregation.cpp | 3 +++ 5 files changed, 21 insertions(+), 9 deletions(-) diff --git a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp index c9e675f30..64d1fd539 100644 --- a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp +++ b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp @@ -4,7 +4,7 @@ void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack) { using namespace dai; - using namespace dai::node; + using namespace dai::node::internal; // Node and Properties declare upfront py::class_ pipelineEventAggregationProperties( diff --git a/include/depthai/pipeline/Node.hpp b/include/depthai/pipeline/Node.hpp index aa5bdbc33..9792d6ec9 100644 --- a/include/depthai/pipeline/Node.hpp +++ b/include/depthai/pipeline/Node.hpp @@ -351,9 +351,10 @@ class Node : public std::enable_shared_from_this { public: std::vector possibleDatatypes; explicit Input(Node& par, InputDescription desc, bool ref = true) - : MessageQueue(std::move(desc.name), desc.queueSize, desc.blocking, par.pipelineEventDispatcher), + : MessageQueue(desc.name, desc.queueSize, desc.blocking, par.pipelineEventDispatcher), parent(par), waitForMessage(desc.waitForMessage), + group(desc.group), possibleDatatypes(std::move(desc.types)) { if(ref) { par.setInputRefs(this); diff --git a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp index 3dbce6149..74a6a31be 100644 --- a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp +++ b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp @@ -7,6 +7,7 @@ namespace dai { namespace node { +namespace internal { /** * @brief Sync node. Performs syncing between image frames @@ -22,7 +23,7 @@ class PipelineEventAggregation : public DeviceNodeCRTP hostEventAgg = nullptr; - std::shared_ptr deviceEventAgg = nullptr; - if(hasHostNodes) hostEventAgg = parent.create(); - if(hasDeviceNodes) deviceEventAgg = parent.create(); + std::shared_ptr hostEventAgg = nullptr; + std::shared_ptr deviceEventAgg = nullptr; + if(hasHostNodes) { + hostEventAgg = parent.create(); + hostEventAgg->setRunOnHost(true); + } + if(hasDeviceNodes) { + deviceEventAgg = parent.create(); + deviceEventAgg->setRunOnHost(false); + } for(auto& node : getAllNodes()) { auto threadedNode = std::dynamic_pointer_cast(node); if(threadedNode) { - if(node->runOnHost() && hostEventAgg) { + if(node->runOnHost() && hostEventAgg && node->id != hostEventAgg->id) { threadedNode->pipelineEventOutput.link(hostEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); - } else if(!node->runOnHost() && deviceEventAgg) { + } else if(!node->runOnHost() && deviceEventAgg && node->id != deviceEventAgg->id) { threadedNode->pipelineEventOutput.link(deviceEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); } } diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 019452118..95f5c104f 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -6,6 +6,7 @@ namespace dai { namespace node { +namespace internal { class NodeEventAggregation { int windowSize; @@ -115,5 +116,7 @@ void PipelineEventAggregation::run() { if(shouldSend) out.send(outState); } } + +} // namespace internal } // namespace node } // namespace dai From 914cbc56f2da7358f1228802bf2cc386415742ec Mon Sep 17 00:00:00 2001 From: asahtik Date: Fri, 26 Sep 2025 14:21:15 +0200 Subject: [PATCH 08/40] Fix pipeline events on host --- examples/cpp/HostNodes/CMakeLists.txt | 3 ++- examples/cpp/HostNodes/image_manip_host.cpp | 13 ++++++++----- include/depthai/pipeline/MessageQueue.hpp | 5 ++++- include/depthai/pipeline/Node.hpp | 5 +---- src/pipeline/ThreadedNode.cpp | 2 +- src/utility/PipelineEventDispatcher.cpp | 2 +- 6 files changed, 17 insertions(+), 13 deletions(-) diff --git a/examples/cpp/HostNodes/CMakeLists.txt b/examples/cpp/HostNodes/CMakeLists.txt index ba9f11135..f4bd4187f 100644 --- a/examples/cpp/HostNodes/CMakeLists.txt +++ b/examples/cpp/HostNodes/CMakeLists.txt @@ -17,6 +17,7 @@ dai_set_example_test_labels(host_node ondevice rvc2_all rvc4 ci) dai_add_example(image_manip_host image_manip_host.cpp ON OFF "${construction_vest}") dai_set_example_test_labels(image_manip_host ondevice rvc2_all rvc4 rvc4rgb ci) +target_compile_definitions(image_manip_host PRIVATE VIDEO_PATH="${construction_vest}") dai_add_example(image_manip_color_conversion_host image_manip_color_conversion.cpp ON OFF) dai_set_example_test_labels(image_manip_color_conversion_host ondevice rvc2_all rvc4 rvc4rgb ci) @@ -32,4 +33,4 @@ dai_set_example_test_labels(threaded_host_node ondevice rvc2_all rvc4 rvc4rgb ci dai_add_example(host_pipeline_synced_node host_pipeline_synced_node.cpp ON OFF) dai_set_example_test_labels(host_pipeline_synced_node ondevice rvc2_all rvc4 ci) -dai_add_example(host_only_camera host_only_camera.cpp OFF OFF) \ No newline at end of file +dai_add_example(host_only_camera host_only_camera.cpp OFF OFF) diff --git a/examples/cpp/HostNodes/image_manip_host.cpp b/examples/cpp/HostNodes/image_manip_host.cpp index ba1d5df8c..c43d75c37 100644 --- a/examples/cpp/HostNodes/image_manip_host.cpp +++ b/examples/cpp/HostNodes/image_manip_host.cpp @@ -5,11 +5,14 @@ #include "depthai/pipeline/node/host/Display.hpp" #include "depthai/pipeline/node/host/Replay.hpp" +#ifndef VIDEO_PATH +#define VIDEO_PATH "" +#endif + int main(int argc, char** argv) { - if(argc <= 1) { - std::cout << "Video parameter is missing" << std::endl; - std::cout << "Usage: ./image_manip_host video_path" << std::endl; - return -1; + std::string videoFile = VIDEO_PATH; + if(argc > 1) { + videoFile = argv[1]; } dai::Pipeline pipeline(false); @@ -27,7 +30,7 @@ int main(int argc, char** argv) { manip->initialConfig->addFlipVertical(); manip->initialConfig->setFrameType(dai::ImgFrame::Type::RGB888p); - replay->setReplayVideoFile(argv[1]); + replay->setReplayVideoFile(videoFile); replay->setOutFrameType(dai::ImgFrame::Type::NV12); replay->setFps(30); replay->setSize(1280, 720); diff --git a/include/depthai/pipeline/MessageQueue.hpp b/include/depthai/pipeline/MessageQueue.hpp index 515c5d773..5c39bf41e 100644 --- a/include/depthai/pipeline/MessageQueue.hpp +++ b/include/depthai/pipeline/MessageQueue.hpp @@ -206,7 +206,10 @@ class MessageQueue : public std::enable_shared_from_this { throw QueueException(CLOSED_QUEUE_MESSAGE); } std::shared_ptr val = nullptr; - if(!queue.tryPop(val)) return nullptr; + if(!queue.tryPop(val)) { + if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name); + return nullptr; + } if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name); return std::dynamic_pointer_cast(val); } diff --git a/include/depthai/pipeline/Node.hpp b/include/depthai/pipeline/Node.hpp index 9792d6ec9..c61c75498 100644 --- a/include/depthai/pipeline/Node.hpp +++ b/include/depthai/pipeline/Node.hpp @@ -351,7 +351,7 @@ class Node : public std::enable_shared_from_this { public: std::vector possibleDatatypes; explicit Input(Node& par, InputDescription desc, bool ref = true) - : MessageQueue(desc.name, desc.queueSize, desc.blocking, par.pipelineEventDispatcher), + : MessageQueue(desc.name.empty() ? par.createUniqueInputName() : desc.name, desc.queueSize, desc.blocking, par.pipelineEventDispatcher), parent(par), waitForMessage(desc.waitForMessage), group(desc.group), @@ -359,9 +359,6 @@ class Node : public std::enable_shared_from_this { if(ref) { par.setInputRefs(this); } - if(getName().empty()) { - setName(par.createUniqueInputName()); - } } /** diff --git a/src/pipeline/ThreadedNode.cpp b/src/pipeline/ThreadedNode.cpp index 8455ed571..246f7ecc8 100644 --- a/src/pipeline/ThreadedNode.cpp +++ b/src/pipeline/ThreadedNode.cpp @@ -27,6 +27,7 @@ void ThreadedNode::initPipelineEventDispatcher(int64_t nodeId) { } void ThreadedNode::start() { + initPipelineEventDispatcher(this->id); // A node should not be started if it is already running // We would be creating multiple threads for the same node DAI_CHECK_V(!isRunning(), "Node with id {} is already running. Cannot start it again. Node name: {}", id, getName()); @@ -36,7 +37,6 @@ void ThreadedNode::start() { running = true; thread = std::thread([this]() { try { - initPipelineEventDispatcher(this->id); run(); } catch(const MessageQueue::QueueException& ex) { // catch the exception and stop the node diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index 2243f5d2a..0f379e0c2 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -7,7 +7,7 @@ namespace utility { void PipelineEventDispatcher::checkNodeId() { if(nodeId == -1) { - throw std::runtime_error("Node ID not set for PipelineEventDispatcher"); + throw std::runtime_error("Node ID not set on PipelineEventDispatcher"); } } void PipelineEventDispatcher::setNodeId(int64_t id) { From d30b5fdc842a9c349fcb9c433f7076bef44c1c51 Mon Sep 17 00:00:00 2001 From: asahtik Date: Tue, 30 Sep 2025 09:24:52 +0200 Subject: [PATCH 09/40] Fix RVC4 pipeline events crash --- include/depthai/pipeline/Pipeline.hpp | 5 + src/pipeline/Pipeline.cpp | 216 +++++++++++++------------- 2 files changed, 116 insertions(+), 105 deletions(-) diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 0a47639f8..df460980e 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -118,6 +118,7 @@ class PipelineImpl : public std::enable_shared_from_this { std::unordered_map recordReplayFilenames; bool removeRecordReplayFiles = true; std::string defaultDeviceId; + bool pipelineOnHost = true; // Pipeline events std::shared_ptr pipelineStateOut; @@ -484,6 +485,10 @@ class Pipeline { void build() { impl()->build(); } + void buildDevice() { + impl()->pipelineOnHost = false; + impl()->build(); + } void start() { impl()->start(); } diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 4c088ef34..b002329bd 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -544,133 +544,139 @@ void PipelineImpl::build() { // TODO(themarpe) - add mutex and set running up ahead if(isBuild) return; - if(defaultDevice) { - auto recordPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_RECORD", "")); - auto replayPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_REPLAY", "")); - - if(defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_RVC4) { - try { + if(pipelineOnHost) { + if(defaultDevice) { + auto recordPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_RECORD", "")); + auto replayPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_REPLAY", "")); + + if(defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_RVC4) { + try { #ifdef DEPTHAI_MERGED_TARGET - if(enableHolisticRecordReplay) { - switch(recordConfig.state) { - case RecordConfig::RecordReplayState::RECORD: - recordPath = recordConfig.outputDir; - replayPath = ""; - break; - case RecordConfig::RecordReplayState::REPLAY: - recordPath = ""; - replayPath = recordConfig.outputDir; - break; - case RecordConfig::RecordReplayState::NONE: - enableHolisticRecordReplay = false; - break; + if(enableHolisticRecordReplay) { + switch(recordConfig.state) { + case RecordConfig::RecordReplayState::RECORD: + recordPath = recordConfig.outputDir; + replayPath = ""; + break; + case RecordConfig::RecordReplayState::REPLAY: + recordPath = ""; + replayPath = recordConfig.outputDir; + break; + case RecordConfig::RecordReplayState::NONE: + enableHolisticRecordReplay = false; + break; + } } - } - defaultDeviceId = defaultDevice->getDeviceId(); - - if(!recordPath.empty() && !replayPath.empty()) { - Logging::getInstance().logger.warn("Both DEPTHAI_RECORD and DEPTHAI_REPLAY are set. Record and replay disabled."); - } else if(!recordPath.empty()) { - if(enableHolisticRecordReplay || utility::checkRecordConfig(recordPath, recordConfig)) { - if(platform::checkWritePermissions(recordPath)) { - if(utility::setupHolisticRecord(parent, - defaultDeviceId, - recordConfig, - recordReplayFilenames, - defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { - recordConfig.state = RecordConfig::RecordReplayState::RECORD; - Logging::getInstance().logger.info("Record enabled."); + defaultDeviceId = defaultDevice->getDeviceId(); + + if(!recordPath.empty() && !replayPath.empty()) { + Logging::getInstance().logger.warn("Both DEPTHAI_RECORD and DEPTHAI_REPLAY are set. Record and replay disabled."); + } else if(!recordPath.empty()) { + if(enableHolisticRecordReplay || utility::checkRecordConfig(recordPath, recordConfig)) { + if(platform::checkWritePermissions(recordPath)) { + if(utility::setupHolisticRecord(parent, + defaultDeviceId, + recordConfig, + recordReplayFilenames, + defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { + recordConfig.state = RecordConfig::RecordReplayState::RECORD; + Logging::getInstance().logger.info("Record enabled."); + } else { + Logging::getInstance().logger.warn("Could not set up holistic record. Record and replay disabled."); + } } else { - Logging::getInstance().logger.warn("Could not set up holistic record. Record and replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_RECORD path does not have write permissions. Record disabled."); } } else { - Logging::getInstance().logger.warn("DEPTHAI_RECORD path does not have write permissions. Record disabled."); + Logging::getInstance().logger.warn("Could not successfully parse DEPTHAI_RECORD. Record disabled."); } - } else { - Logging::getInstance().logger.warn("Could not successfully parse DEPTHAI_RECORD. Record disabled."); - } - } else if(!replayPath.empty()) { - if(platform::checkPathExists(replayPath)) { - if(platform::checkWritePermissions(replayPath)) { - if(utility::setupHolisticReplay(parent, - replayPath, - defaultDeviceId, - recordConfig, - recordReplayFilenames, - defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { - recordConfig.state = RecordConfig::RecordReplayState::REPLAY; - if(platform::checkPathExists(replayPath, true)) { - removeRecordReplayFiles = false; + } else if(!replayPath.empty()) { + if(platform::checkPathExists(replayPath)) { + if(platform::checkWritePermissions(replayPath)) { + if(utility::setupHolisticReplay(parent, + replayPath, + defaultDeviceId, + recordConfig, + recordReplayFilenames, + defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { + recordConfig.state = RecordConfig::RecordReplayState::REPLAY; + if(platform::checkPathExists(replayPath, true)) { + removeRecordReplayFiles = false; + } + Logging::getInstance().logger.info("Replay enabled."); + } else { + Logging::getInstance().logger.warn("Could not set up holistic replay. Record and replay disabled."); } - Logging::getInstance().logger.info("Replay enabled."); } else { - Logging::getInstance().logger.warn("Could not set up holistic replay. Record and replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not have write permissions. Replay disabled."); } } else { - Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not have write permissions. Replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not exist or is invalid. Replay disabled."); } - } else { - Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not exist or is invalid. Replay disabled."); } - } #else - recordConfig.state = RecordConfig::RecordReplayState::NONE; - if(!recordPath.empty() || !replayPath.empty()) { - Logging::getInstance().logger.warn("Merged target is required to use holistic record/replay."); - } + recordConfig.state = RecordConfig::RecordReplayState::NONE; + if(!recordPath.empty() || !replayPath.empty()) { + Logging::getInstance().logger.warn("Merged target is required to use holistic record/replay."); + } #endif - } catch(std::runtime_error& e) { - Logging::getInstance().logger.warn("Could not set up record / replay: {}", e.what()); + } catch(std::runtime_error& e) { + Logging::getInstance().logger.warn("Could not set up record / replay: {}", e.what()); + } + } else if(enableHolisticRecordReplay || !recordPath.empty() || !replayPath.empty()) { + throw std::runtime_error("Holistic record/replay is only supported on RVC2 devices for now."); } - } else if(enableHolisticRecordReplay || !recordPath.empty() || !replayPath.empty()) { - throw std::runtime_error("Holistic record/replay is only supported on RVC2 devices for now."); } - } - // Create pipeline event aggregator node and link - // Check if any nodes are on host or device - bool hasHostNodes = false; - bool hasDeviceNodes = false; - for(const auto& node : getAllNodes()) { - if(node->runOnHost()) { - hasHostNodes = true; - } else { - hasDeviceNodes = true; + // Create pipeline event aggregator node and link + // Check if any nodes are on host or device + bool hasHostNodes = false; + bool hasDeviceNodes = false; + for(const auto& node : getAllNodes()) { + if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; + + if(node->runOnHost()) { + hasHostNodes = true; + } else { + hasDeviceNodes = true; + } } - } - std::shared_ptr hostEventAgg = nullptr; - std::shared_ptr deviceEventAgg = nullptr; - if(hasHostNodes) { - hostEventAgg = parent.create(); - hostEventAgg->setRunOnHost(true); - } - if(hasDeviceNodes) { - deviceEventAgg = parent.create(); - deviceEventAgg->setRunOnHost(false); - } - for(auto& node : getAllNodes()) { - auto threadedNode = std::dynamic_pointer_cast(node); - if(threadedNode) { - if(node->runOnHost() && hostEventAgg && node->id != hostEventAgg->id) { - threadedNode->pipelineEventOutput.link(hostEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); - } else if(!node->runOnHost() && deviceEventAgg && node->id != deviceEventAgg->id) { - threadedNode->pipelineEventOutput.link(deviceEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + std::shared_ptr hostEventAgg = nullptr; + std::shared_ptr deviceEventAgg = nullptr; + if(hasHostNodes) { + hostEventAgg = parent.create(); + hostEventAgg->setRunOnHost(true); + } + if(hasDeviceNodes) { + deviceEventAgg = parent.create(); + deviceEventAgg->setRunOnHost(false); + } + for(auto& node : getAllNodes()) { + if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; + + auto threadedNode = std::dynamic_pointer_cast(node); + if(threadedNode) { + if(node->runOnHost() && hostEventAgg && node->id != hostEventAgg->id) { + threadedNode->pipelineEventOutput.link(hostEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } else if(!node->runOnHost() && deviceEventAgg && node->id != deviceEventAgg->id) { + threadedNode->pipelineEventOutput.link(deviceEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } } } + auto stateMerge = parent.create()->build(hasDeviceNodes, hasHostNodes); + if(deviceEventAgg) { + deviceEventAgg->out.link(stateMerge->inputDevice); + } + if(hostEventAgg) { + hostEventAgg->out.link(stateMerge->inputHost); + } + pipelineStateOut = stateMerge->out.createOutputQueue(1, false); } - auto stateMerge = parent.create()->build(hasDeviceNodes, hasHostNodes); - if(deviceEventAgg) { - deviceEventAgg->out.link(stateMerge->inputDevice); - } - if(hostEventAgg) { - hostEventAgg->out.link(stateMerge->inputHost); - } - pipelineStateOut = stateMerge->out.createOutputQueue(1, false); isBuild = true; From 3a579ac2f4ea0ec4cececc6d337221b3d980c762 Mon Sep 17 00:00:00 2001 From: asahtik Date: Tue, 30 Sep 2025 09:26:27 +0200 Subject: [PATCH 10/40] RVC4 FW: Fix pipeline events crash --- cmake/Depthai/DepthaiDeviceRVC4Config.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake index 14c8b244a..cfcb0c77f 100644 --- a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake +++ b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake @@ -4,4 +4,4 @@ set(DEPTHAI_DEVICE_RVC4_MATURITY "snapshot") # "version if applicable" # set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+93f7b75a885aa32f44c5e9f53b74470c49d2b1af") -set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+34d24327d3bf906cd73f6277fd10752a6e631608") +set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+8e58c765badb38f3efa655c203a0429470600e5d") From 2e9b26dda8a341773e113bf4b7e6c896766f640c Mon Sep 17 00:00:00 2001 From: asahtik Date: Wed, 1 Oct 2025 16:29:37 +0200 Subject: [PATCH 11/40] Add more stats, queue state WIP --- .../datatype/PipelineEventBindings.cpp | 18 +- .../datatype/PipelineStateBindings.cpp | 42 ++- include/depthai/pipeline/Node.hpp | 2 +- .../pipeline/datatype/PipelineEvent.hpp | 18 +- .../pipeline/datatype/PipelineState.hpp | 43 ++- .../utility/PipelineEventDispatcher.hpp | 7 +- .../PipelineEventDispatcherInterface.hpp | 4 +- src/pipeline/MessageQueue.cpp | 2 +- src/pipeline/ThreadedNode.cpp | 2 +- .../internal/PipelineEventAggregation.cpp | 246 ++++++++++++++---- src/utility/PipelineEventDispatcher.cpp | 69 ++--- 11 files changed, 333 insertions(+), 120 deletions(-) diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp index 89478d816..65f212520 100644 --- a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -17,7 +17,8 @@ void bind_pipelineevent(pybind11::module& m, void* pCallstack) { using namespace dai; py::class_, Buffer, std::shared_ptr> pipelineEvent(m, "PipelineEvent", DOC(dai, PipelineEvent)); - py::enum_ pipelineEventType(pipelineEvent, "EventType", DOC(dai, PipelineEvent, EventType)); + py::enum_ pipelineEventType(pipelineEvent, "Type", DOC(dai, PipelineEvent, Type)); + py::enum_ pipelineEventInterval(pipelineEvent, "Interval", DOC(dai, PipelineEvent, Interval)); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// @@ -32,19 +33,20 @@ void bind_pipelineevent(pybind11::module& m, void* pCallstack) { /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// - pipelineEventType.value("CUSTOM", PipelineEvent::EventType::CUSTOM) - .value("LOOP", PipelineEvent::EventType::LOOP) - .value("INPUT", PipelineEvent::EventType::INPUT) - .value("OUTPUT", PipelineEvent::EventType::OUTPUT) - .value("FUNC_CALL", PipelineEvent::EventType::FUNC_CALL); + pipelineEventType.value("CUSTOM", PipelineEvent::Type::CUSTOM) + .value("LOOP", PipelineEvent::Type::LOOP) + .value("INPUT", PipelineEvent::Type::INPUT) + .value("OUTPUT", PipelineEvent::Type::OUTPUT); + pipelineEventInterval.value("NONE", PipelineEvent::Interval::NONE) + .value("START", PipelineEvent::Interval::START) + .value("END", PipelineEvent::Interval::END); // Message pipelineEvent.def(py::init<>()) .def("__repr__", &PipelineEvent::str) .def_readwrite("nodeId", &PipelineEvent::nodeId, DOC(dai, PipelineEvent, nodeId)) .def_readwrite("metadata", &PipelineEvent::metadata, DOC(dai, PipelineEvent, metadata)) - .def_readwrite("timestamp", &PipelineEvent::timestamp, DOC(dai, PipelineEvent, timestamp)) - .def_readwrite("duration", &PipelineEvent::duration, DOC(dai, PipelineEvent, duration)) + .def_readwrite("interval", &PipelineEvent::interval, DOC(dai, PipelineEvent, interval)) .def_readwrite("type", &PipelineEvent::type, DOC(dai, PipelineEvent, type)) .def_readwrite("source", &PipelineEvent::source, DOC(dai, PipelineEvent, source)) .def("getTimestamp", &PipelineEvent::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp index 8b59cc6ea..eff2c7bdd 100644 --- a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -17,7 +17,10 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { using namespace dai; py::class_ nodeState(m, "NodeState", DOC(dai, NodeState)); - py::class_ nodeStateTiming(nodeState, "Timing", DOC(dai, NodeState, Timing)); + py::class_ durationEvent(nodeState, "DurationEvent", DOC(dai, NodeState, DurationEvent)); + py::class_ nodeStateTimingStats(nodeState, "TimingStats", DOC(dai, NodeState, TimingStats)); + py::class_ nodeStateQueueStats(nodeState, "QueueStats", DOC(dai, NodeState, QueueStats)); + py::class_ nodeStateQueueState(nodeState, "QueueState", DOC(dai, NodeState, QueueState)); py::class_, Buffer, std::shared_ptr> pipelineState(m, "PipelineState", DOC(dai, PipelineState)); /////////////////////////////////////////////////////////////////////// @@ -33,16 +36,43 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// - nodeStateTiming.def(py::init<>()) - .def("__repr__", &NodeState::Timing::str) - .def_readwrite("averageMicros", &NodeState::Timing::averageMicros, DOC(dai, NodeState, Timing, averageMicros)) - .def_readwrite("stdDevMicros", &NodeState::Timing::stdDevMicros, DOC(dai, NodeState, Timing, stdDevMicros)); + durationEvent.def(py::init<>()) + .def("__repr__", &NodeState::DurationEvent::str) + .def_readwrite("startEvent", &NodeState::DurationEvent::startEvent, DOC(dai, NodeState, DurationEvent, startEvent)) + .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, TimingStats, durationUs)); + + nodeStateTimingStats.def(py::init<>()) + .def("__repr__", &NodeState::TimingStats::str) + .def_readwrite("minMicros", &NodeState::TimingStats::minMicros, DOC(dai, NodeState, TimingStats, minMicros)) + .def_readwrite("maxMicros", &NodeState::TimingStats::maxMicros, DOC(dai, NodeState, TimingStats, maxMicros)) + .def_readwrite("averageMicrosRecent", &NodeState::TimingStats::averageMicrosRecent, DOC(dai, NodeState, TimingStats, averageMicrosRecent)) + .def_readwrite("stdDevMicrosRecent", &NodeState::TimingStats::stdDevMicrosRecent, DOC(dai, NodeState, TimingStats, stdDevMicrosRecent)) + .def_readwrite("minMicrosRecent", &NodeState::TimingStats::minMicrosRecent, DOC(dai, NodeState, TimingStats, minMicrosRecent)) + .def_readwrite("maxMicrosRecent", &NodeState::TimingStats::maxMicrosRecent, DOC(dai, NodeState, TimingStats, maxMicrosRecent)) + .def_readwrite("medianMicrosRecent", &NodeState::TimingStats::medianMicrosRecent, DOC(dai, NodeState, TimingStats, medianMicrosRecent)); + + nodeStateQueueStats.def(py::init<>()) + .def("__repr__", &NodeState::QueueStats::str) + .def_readwrite("maxQueued", &NodeState::QueueStats::maxQueued, DOC(dai, NodeState, QueueStats, maxQueued)) + .def_readwrite("minQueuedRecent", &NodeState::QueueStats::minQueuedRecent, DOC(dai, NodeState, QueueStats, minQueuedRecent)) + .def_readwrite("maxQueuedRecent", &NodeState::QueueStats::maxQueuedRecent, DOC(dai, NodeState, QueueStats, maxQueuedRecent)) + .def_readwrite("medianQueuedRecent", &NodeState::QueueStats::medianQueuedRecent, DOC(dai, NodeState, QueueStats, medianQueuedRecent)); + + nodeStateQueueState.def(py::init<>()) + .def("__repr__", &NodeState::QueueState::str) + .def_readwrite("waiting", &NodeState::QueueState::waiting, DOC(dai, NodeState, QueueState, waiting)) + .def_readwrite("numQueued", &NodeState::QueueState::numQueued, DOC(dai, NodeState, QueueState, numQueued)) + .def_readwrite("timingStats", &NodeState::QueueState::timingStats, DOC(dai, NodeState, QueueState, timingStats)) + .def_readwrite("queueStats", &NodeState::QueueState::queueStats, DOC(dai, NodeState, QueueState, queueStats)); nodeState.def(py::init<>()) .def("__repr__", &NodeState::str) .def_readwrite("events", &NodeState::events, DOC(dai, NodeState, events)) .def_readwrite("timingsByType", &NodeState::timingsByType, DOC(dai, NodeState, timingsByType)) - .def_readwrite("timingsByInstance", &NodeState::timingsByInstance, DOC(dai, NodeState, timingsByInstance)); + .def_readwrite("inputStates", &NodeState::inputStates, DOC(dai, NodeState, inputStates)) + .def_readwrite("outputStates", &NodeState::outputStates, DOC(dai, NodeState, outputStates)) + .def_readwrite("mainLoopStats", &NodeState::mainLoopStats, DOC(dai, NodeState, mainLoopStats)) + .def_readwrite("otherStats", &NodeState::otherStats, DOC(dai, NodeState, otherStats)); // Message pipelineState.def(py::init<>()) diff --git a/include/depthai/pipeline/Node.hpp b/include/depthai/pipeline/Node.hpp index c61c75498..4d858d1fc 100644 --- a/include/depthai/pipeline/Node.hpp +++ b/include/depthai/pipeline/Node.hpp @@ -146,7 +146,7 @@ class Node : public std::enable_shared_from_this { setName(par.createUniqueOutputName()); } if(pipelineEventDispatcher && getName() != "pipelineEventOutput") { - pipelineEventDispatcher->addEvent(getName(), PipelineEvent::EventType::OUTPUT); + pipelineEventDispatcher->addEvent(getName(), PipelineEvent::Type::OUTPUT); } } diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp index 9b2b67672..0eae2cf6e 100644 --- a/include/depthai/pipeline/datatype/PipelineEvent.hpp +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -2,7 +2,6 @@ #include -#include "depthai/common/optional.hpp" #include "depthai/pipeline/datatype/Buffer.hpp" namespace dai { @@ -12,22 +11,25 @@ namespace dai { */ class PipelineEvent : public Buffer { public: - enum class EventType : std::int32_t { + enum class Type : std::int32_t { CUSTOM = 0, LOOP = 1, INPUT = 2, OUTPUT = 3, - FUNC_CALL = 4 + }; + enum class Interval : std::int32_t { + NONE = 0, + START = 1, + END = 2 }; PipelineEvent() = default; virtual ~PipelineEvent() = default; int64_t nodeId = -1; - std::optional metadata; - uint64_t timestamp {0}; - uint64_t duration {0}; // Duration in microseconds - EventType type = EventType::CUSTOM; + Buffer metadata; + Interval interval = Interval::NONE; + Type type = Type::CUSTOM; std::string source; void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { @@ -35,7 +37,7 @@ class PipelineEvent : public Buffer { datatype = DatatypeEnum::PipelineEvent; }; - DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, metadata, timestamp, duration, type, source); + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, metadata, interval, type, source); }; } // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index 159dfdbc3..ae3fc5d68 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -10,16 +10,43 @@ namespace dai { class NodeState { public: - struct Timing { - uint64_t averageMicros; - uint64_t stdDevMicros; - DEPTHAI_SERIALIZE(Timing, averageMicros, stdDevMicros); + struct DurationEvent { + PipelineEvent startEvent; + uint64_t durationUs; + DEPTHAI_SERIALIZE(DurationEvent, startEvent, durationUs); }; - std::vector events; - std::unordered_map timingsByType; - std::unordered_map timingsByInstance; + struct TimingStats { + uint64_t minMicros = -1; + uint64_t maxMicros; + uint64_t averageMicrosRecent; + uint64_t stdDevMicrosRecent; + uint64_t minMicrosRecent = -1; + uint64_t maxMicrosRecent; + uint64_t medianMicrosRecent; + DEPTHAI_SERIALIZE(TimingStats, minMicros, maxMicros, averageMicrosRecent, stdDevMicrosRecent, minMicrosRecent, maxMicrosRecent, medianMicrosRecent); + }; + struct QueueStats { + uint32_t maxQueued; + uint32_t minQueuedRecent; + uint32_t maxQueuedRecent; + uint32_t medianQueuedRecent; + DEPTHAI_SERIALIZE(QueueStats, maxQueued, minQueuedRecent, maxQueuedRecent, medianQueuedRecent); + }; + struct QueueState { + bool waiting; + uint32_t numQueued; + TimingStats timingStats; + QueueStats queueStats; + DEPTHAI_SERIALIZE(QueueState, waiting, numQueued, timingStats); + }; + std::vector events; + std::unordered_map timingsByType; + std::unordered_map inputStates; + std::unordered_map outputStates; + TimingStats mainLoopStats; + std::unordered_map otherStats; - DEPTHAI_SERIALIZE(NodeState, events, timingsByType, timingsByInstance); + DEPTHAI_SERIALIZE(NodeState, events, timingsByType, inputStates, outputStates, mainLoopStats, otherStats); }; /** diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp index 78a47ab8c..26ded4550 100644 --- a/include/depthai/utility/PipelineEventDispatcher.hpp +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -15,11 +15,8 @@ namespace utility { class PipelineEventDispatcher : public PipelineEventDispatcherInterface { struct EventStatus { - PipelineEvent::EventType type; - std::chrono::microseconds duration; - std::chrono::time_point timestamp; + PipelineEvent event; bool ongoing; - std::optional metadata; }; int64_t nodeId = -1; @@ -36,7 +33,7 @@ class PipelineEventDispatcher : public PipelineEventDispatcherInterface { void setNodeId(int64_t id) override; - void addEvent(const std::string& source, PipelineEvent::EventType type) override; + void addEvent(const std::string& source, PipelineEvent::Type type) override; void startEvent(const std::string& source, std::optional metadata = std::nullopt) override; // Start event with a start and an end void endEvent(const std::string& source) override; // Stop event with a start and an end diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp index 34bf526f0..332281ecb 100644 --- a/include/depthai/utility/PipelineEventDispatcherInterface.hpp +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -1,5 +1,7 @@ #pragma once +#include + #include "depthai/pipeline/datatype/PipelineEvent.hpp" namespace dai { @@ -9,7 +11,7 @@ class PipelineEventDispatcherInterface { public: virtual ~PipelineEventDispatcherInterface() = default; virtual void setNodeId(int64_t id) = 0; - virtual void addEvent(const std::string& source, PipelineEvent::EventType type) = 0; + virtual void addEvent(const std::string& source, PipelineEvent::Type type) = 0; virtual void startEvent(const std::string& source, std::optional metadata = std::nullopt) = 0; // Start event with a start and an end virtual void endEvent(const std::string& source) = 0; // Stop event with a start and an end virtual void pingEvent(const std::string& source) = 0; // Event where stop and start are the same (eg. loop) diff --git a/src/pipeline/MessageQueue.cpp b/src/pipeline/MessageQueue.cpp index 9d2aa464e..faee735db 100644 --- a/src/pipeline/MessageQueue.cpp +++ b/src/pipeline/MessageQueue.cpp @@ -24,7 +24,7 @@ MessageQueue::MessageQueue(std::string name, std::shared_ptr pipelineEventDispatcher) : queue(maxSize, blocking), name(std::move(name)), pipelineEventDispatcher(pipelineEventDispatcher) { if(pipelineEventDispatcher) { - pipelineEventDispatcher->addEvent(this->name, PipelineEvent::EventType::INPUT); + pipelineEventDispatcher->addEvent(this->name, PipelineEvent::Type::INPUT); } } diff --git a/src/pipeline/ThreadedNode.cpp b/src/pipeline/ThreadedNode.cpp index 246f7ecc8..228cb1a24 100644 --- a/src/pipeline/ThreadedNode.cpp +++ b/src/pipeline/ThreadedNode.cpp @@ -23,7 +23,7 @@ ThreadedNode::ThreadedNode() { void ThreadedNode::initPipelineEventDispatcher(int64_t nodeId) { pipelineEventDispatcher->setNodeId(nodeId); - pipelineEventDispatcher->addEvent("_mainLoop", PipelineEvent::EventType::LOOP); + pipelineEventDispatcher->addEvent("_mainLoop", PipelineEvent::Type::LOOP); } void ThreadedNode::start() { diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 95f5c104f..768f78239 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -3,72 +3,219 @@ #include "depthai/pipeline/datatype/PipelineEvent.hpp" #include "depthai/pipeline/datatype/PipelineState.hpp" #include "depthai/utility/CircularBuffer.hpp" +#include "pipeline/ThreadedNodeImpl.hpp" namespace dai { namespace node { namespace internal { class NodeEventAggregation { + private: + std::shared_ptr logger; + int windowSize; public: - NodeEventAggregation(int windowSize) : windowSize(windowSize), eventsBuffer(windowSize) {} + NodeEventAggregation(int windowSize, std::shared_ptr logger) : logger(logger), windowSize(windowSize), eventsBuffer(windowSize) {} NodeState state; - utility::CircularBuffer eventsBuffer; - std::unordered_map>> timingsBufferByType; - std::unordered_map>> timingsBufferByInstance; + utility::CircularBuffer eventsBuffer; + std::unordered_map>> timingsBufferByType; + std::unordered_map>> inputTimingsBuffers; + std::unordered_map>> outputTimingsBuffers; + std::unique_ptr> mainLoopTimingsBuffer; + std::unordered_map>> otherTimingsBuffers; + + std::unordered_map> ongoingInputEvents; + std::unordered_map> ongoingOutputEvents; + std::optional ongoingMainLoopEvent; + std::unordered_map> ongoingOtherEvents; uint32_t eventCount = 0; - void add(PipelineEvent& event) { - // TODO optimize avg - ++eventCount; - eventsBuffer.add(event); - state.events = eventsBuffer.getBuffer(); - if(timingsBufferByType.find(event.type) == timingsBufferByType.end()) { - timingsBufferByType[event.type] = std::make_unique>(windowSize); - } - if(timingsBufferByInstance.find(event.source) == timingsBufferByInstance.end()) { - timingsBufferByInstance[event.source] = std::make_unique>(windowSize); - } - timingsBufferByType[event.type]->add(event.duration); - timingsBufferByInstance[event.source]->add(event.duration); - // Calculate average duration and standard deviation from buffers - state.timingsByType[event.type].averageMicros = 0; - state.timingsByType[event.type].stdDevMicros = 0; - state.timingsByInstance[event.source].averageMicros = 0; - state.timingsByInstance[event.source].stdDevMicros = 0; - auto bufferByType = timingsBufferByType[event.type]->getBuffer(); - auto bufferByInstance = timingsBufferByInstance[event.source]->getBuffer(); - if(!bufferByType.empty()) { - uint64_t sum = 0; - double variance = 0; - for(auto v : bufferByType) { - sum += v; + private: + inline bool updateIntervalBuffers(PipelineEvent& event) { + using namespace std::chrono; + auto& ongoingEvent = [&]() -> std::optional& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + throw std::runtime_error("LOOP event should not be an interval"); + case PipelineEvent::Type::INPUT: + return ongoingInputEvents[event.source]; + case PipelineEvent::Type::OUTPUT: + return ongoingOutputEvents[event.source]; + case PipelineEvent::Type::CUSTOM: + return ongoingOtherEvents[event.source]; + } + return ongoingMainLoopEvent; // To silence compiler warning + }(); + auto& timingsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + throw std::runtime_error("LOOP event should not be an interval"); + case PipelineEvent::Type::INPUT: + if(inputTimingsBuffers.find(event.source) == inputTimingsBuffers.end()) { + inputTimingsBuffers[event.source] = std::make_unique>(windowSize); + } + return inputTimingsBuffers[event.source]; + case PipelineEvent::Type::OUTPUT: + if(outputTimingsBuffers.find(event.source) == outputTimingsBuffers.end()) { + outputTimingsBuffers[event.source] = std::make_unique>(windowSize); + } + return outputTimingsBuffers[event.source]; + case PipelineEvent::Type::CUSTOM: + if(otherTimingsBuffers.find(event.source) == otherTimingsBuffers.end()) { + otherTimingsBuffers[event.source] = std::make_unique>(windowSize); + } + return otherTimingsBuffers[event.source]; + } + return mainLoopTimingsBuffer; // To silence compiler warning + }(); + if(ongoingEvent.has_value() && ongoingEvent->sequenceNum == event.sequenceNum && event.interval == PipelineEvent::Interval::END) { + // End event + NodeState::DurationEvent durationEvent; + durationEvent.startEvent = *ongoingEvent; + durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->getTimestamp()).count(); + eventsBuffer.add(durationEvent); + state.events = eventsBuffer.getBuffer(); + + if(timingsBufferByType.find(event.type) == timingsBufferByType.end()) { + timingsBufferByType[event.type] = std::make_unique>(windowSize); } - state.timingsByType[event.type].averageMicros = sum / bufferByType.size(); - // Calculate standard deviation - for(auto v : bufferByType) { - auto diff = v - state.timingsByType[event.type].averageMicros; - variance += diff * diff; + timingsBufferByType[event.type]->add(durationEvent.durationUs); + timingsBuffer->add(durationEvent.durationUs); + + ongoingEvent = std::nullopt; + + return true; + } else { + if(ongoingEvent.has_value()) { + logger->warn("Ongoing event not finished before new one started. Event source: {}, node {}", ongoingEvent->source, event.nodeId); + } + if(event.interval == PipelineEvent::Interval::START) { + // Start event + ongoingEvent = event; } - variance /= bufferByType.size(); - state.timingsByType[event.type].stdDevMicros = (uint64_t)(std::sqrt(variance)); + return false; } - if(!bufferByInstance.empty()) { - uint64_t sum = 0; - double variance = 0; - for(auto v : bufferByInstance) { - sum += v; + } + + inline bool updatePingBuffers(PipelineEvent& event) { + using namespace std::chrono; + auto& ongoingEvent = [&]() -> std::optional& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return ongoingMainLoopEvent; + case PipelineEvent::Type::CUSTOM: + return ongoingOtherEvents[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + } + return ongoingMainLoopEvent; // To silence compiler warning + }(); + auto& timingsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return mainLoopTimingsBuffer; + case PipelineEvent::Type::CUSTOM: + if(otherTimingsBuffers.find(event.source) == otherTimingsBuffers.end()) { + otherTimingsBuffers[event.source] = std::make_unique>(windowSize); + } + return otherTimingsBuffers[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + } + return mainLoopTimingsBuffer; // To silence compiler warning + }(); + if(ongoingEvent.has_value() && ongoingEvent->sequenceNum == event.sequenceNum - 1) { + // End event + NodeState::DurationEvent durationEvent; + durationEvent.startEvent = *ongoingEvent; + durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->getTimestamp()).count(); + eventsBuffer.add(durationEvent); + state.events = eventsBuffer.getBuffer(); + + if(timingsBufferByType.find(event.type) == timingsBufferByType.end()) { + timingsBufferByType[event.type] = std::make_unique>(windowSize); + } + if(timingsBuffer == nullptr) { + timingsBuffer = std::make_unique>(windowSize); } - state.timingsByInstance[event.source].averageMicros = sum / bufferByInstance.size(); - // Calculate standard deviation - for(auto v : bufferByInstance) { - auto diff = v - state.timingsByInstance[event.source].averageMicros; - variance += diff * diff; + timingsBufferByType[event.type]->add(durationEvent.durationUs); + timingsBuffer->add(durationEvent.durationUs); + + // Start event + ongoingEvent = event; + + return true; + } else if(ongoingEvent.has_value()) { + logger->warn("Ongoing main loop event not finished before new one started. Event source: {}, node {}", ongoingEvent->source, event.nodeId); + } + // Start event + ongoingEvent = event; + + return false; + } + + inline void updateTimingStats(NodeState::TimingStats& stats, const utility::CircularBuffer& buffer) { + stats.minMicros = std::min(stats.minMicros, buffer.last()); + stats.maxMicros = std::max(stats.maxMicros, buffer.last()); + stats.averageMicrosRecent = 0; + stats.stdDevMicrosRecent = 0; + + auto bufferByType = buffer.getBuffer(); + uint64_t sum = 0; + double variance = 0; + for(auto v : bufferByType) { + sum += v; + } + stats.averageMicrosRecent = sum / bufferByType.size(); + // Calculate standard deviation + for(auto v : bufferByType) { + auto diff = v - stats.averageMicrosRecent; + variance += diff * diff; + } + variance /= bufferByType.size(); + stats.stdDevMicrosRecent = (uint64_t)(std::sqrt(variance)); + + std::sort(bufferByType.begin(), bufferByType.end()); + stats.minMicrosRecent = bufferByType.front(); + stats.maxMicrosRecent = bufferByType.back(); + stats.medianMicrosRecent = bufferByType[bufferByType.size() / 2]; + if(bufferByType.size() % 2 == 0) { + stats.medianMicrosRecent = (stats.medianMicrosRecent + bufferByType[bufferByType.size() / 2 - 1]) / 2; + } + } + + public: + void add(PipelineEvent& event) { + using namespace std::chrono; + ++eventCount; + bool recalculateStats = false; + if(event.interval == PipelineEvent::Interval::NONE) { + recalculateStats = updatePingBuffers(event); + } else { + recalculateStats = updateIntervalBuffers(event); + } + if(recalculateStats) { + // By type + updateTimingStats(state.timingsByType[event.type], *timingsBufferByType[event.type]); + // By instance + switch(event.type) { + case PipelineEvent::Type::CUSTOM: + updateTimingStats(state.otherStats[event.source], *otherTimingsBuffers[event.source]); + break; + case PipelineEvent::Type::LOOP: + updateTimingStats(state.mainLoopStats, *mainLoopTimingsBuffer); + break; + case PipelineEvent::Type::INPUT: + updateTimingStats(state.inputStates[event.source].timingStats, *inputTimingsBuffers[event.source]); + break; + case PipelineEvent::Type::OUTPUT: + updateTimingStats(state.outputStates[event.source].timingStats, *outputTimingsBuffers[event.source]); + break; } - variance /= bufferByInstance.size(); - state.timingsByInstance[event.source].stdDevMicros = (uint64_t)(std::sqrt(variance)); } } }; @@ -85,6 +232,7 @@ bool PipelineEventAggregation::runOnHost() const { } void PipelineEventAggregation::run() { + auto& logger = pimpl->logger; std::unordered_map nodeStates; uint32_t sequenceNum = 0; while(isRunning()) { @@ -95,7 +243,7 @@ void PipelineEventAggregation::run() { for(auto& [k, event] : events) { if(event != nullptr) { if(nodeStates.find(event->nodeId) == nodeStates.end()) { - nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(properties.aggregationWindowSize)); + nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(properties.aggregationWindowSize, logger)); } nodeStates.at(event->nodeId).add(*event); } diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index 0f379e0c2..c1d08479e 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -1,4 +1,5 @@ #include "depthai/utility/PipelineEventDispatcher.hpp" + #include #include @@ -13,12 +14,15 @@ void PipelineEventDispatcher::checkNodeId() { void PipelineEventDispatcher::setNodeId(int64_t id) { nodeId = id; } -void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent::EventType type) { +void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent::Type type) { if(!source.empty()) { if(events.find(source) != events.end()) { throw std::runtime_error("Event with name '" + source + "' already exists"); } - events[source] = {type, {}, {}, false, std::nullopt}; + PipelineEvent event; + event.type = type; + event.source = source; + events[source] = {event, false}; } } void PipelineEventDispatcher::startEvent(const std::string& source, std::optional metadata) { @@ -30,9 +34,18 @@ void PipelineEventDispatcher::startEvent(const std::string& source, std::optiona if(event.ongoing) { throw std::runtime_error("Event with name " + source + " is already ongoing"); } - event.timestamp = std::chrono::steady_clock::now(); + event.event.setTimestamp(std::chrono::steady_clock::now()); + event.event.tsDevice = event.event.ts; + event.event.sequenceNum = sequenceNum++; + event.event.nodeId = nodeId; + // TODO: event.event.metadata.emplace(metadata); + event.event.interval = PipelineEvent::Interval::START; + // type and source are already set event.ongoing = true; - event.metadata = metadata; + + if(out) { + out->send(std::make_shared(event.event)); + } } void PipelineEventDispatcher::endEvent(const std::string& source) { checkNodeId(); @@ -45,24 +58,20 @@ void PipelineEventDispatcher::endEvent(const std::string& source) { if(!event.ongoing) { throw std::runtime_error("Event with name " + source + " has not been started"); } - event.duration = std::chrono::duration_cast(now - event.timestamp); - event.ongoing = false; - PipelineEvent pipelineEvent; - pipelineEvent.nodeId = nodeId; - pipelineEvent.timestamp = std::chrono::duration_cast(event.timestamp.time_since_epoch()).count(); - pipelineEvent.duration = event.duration.count(); - pipelineEvent.type = event.type; - pipelineEvent.source = source; - pipelineEvent.sequenceNum = sequenceNum++; - pipelineEvent.setTimestampDevice(std::chrono::steady_clock::now()); - pipelineEvent.ts = pipelineEvent.tsDevice; + event.event.setTimestamp(now); + event.event.tsDevice = event.event.ts; + event.event.nodeId = nodeId; + // TODO: event.event.metadata.emplace(metadata); + event.event.interval = PipelineEvent::Interval::END; + // type and source are already set + event.ongoing = false; if(out) { - out->send(std::make_shared(pipelineEvent)); + out->send(std::make_shared(event.event)); } - event.metadata = std::nullopt; + // event.event.metadata = 0u; TODO } void PipelineEventDispatcher::pingEvent(const std::string& source) { checkNodeId(); @@ -73,22 +82,18 @@ void PipelineEventDispatcher::pingEvent(const std::string& source) { } auto& event = events[source]; if(event.ongoing) { - event.duration = std::chrono::duration_cast(now - event.timestamp); - event.timestamp = now; - - PipelineEvent pipelineEvent; - pipelineEvent.nodeId = nodeId; - pipelineEvent.duration = event.duration.count(); - pipelineEvent.type = event.type; - pipelineEvent.source = source; - pipelineEvent.metadata = std::nullopt; + throw std::runtime_error("Event with name " + source + " is already ongoing"); + } + event.event.setTimestamp(now); + event.event.tsDevice = event.event.ts; + event.event.sequenceNum = sequenceNum++; + event.event.nodeId = nodeId; + // TODO: event.event.metadata.emplace(metadata); + event.event.interval = PipelineEvent::Interval::NONE; + // type and source are already set - if(out) { - out->send(std::make_shared(pipelineEvent)); - } - } else { - event.timestamp = now; - event.ongoing = true; + if(out) { + out->send(std::make_shared(event.event)); } } From a5e93d83bdf41d45413e8e5c2ccdc5fec5aa3180 Mon Sep 17 00:00:00 2001 From: asahtik Date: Wed, 1 Oct 2025 17:07:13 +0200 Subject: [PATCH 12/40] Event instances now have independent sequence numbers --- .../node/internal/PipelineEventAggregation.hpp | 2 +- .../node/internal/PipelineEventAggregation.cpp | 12 ++++++++++-- src/utility/PipelineEventDispatcher.cpp | 4 ++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp index 74a6a31be..d4a454c0a 100644 --- a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp +++ b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp @@ -23,7 +23,7 @@ class PipelineEventAggregation : public DeviceNodeCRTPwarn("Ongoing event not finished before new one started. Event source: {}, node {}", ongoingEvent->source, event.nodeId); + logger->warn("Ongoing event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", + ongoingEvent->sequenceNum, + event.sequenceNum, + ongoingEvent->source, + event.nodeId); } if(event.interval == PipelineEvent::Interval::START) { // Start event @@ -150,7 +154,11 @@ class NodeEventAggregation { return true; } else if(ongoingEvent.has_value()) { - logger->warn("Ongoing main loop event not finished before new one started. Event source: {}, node {}", ongoingEvent->source, event.nodeId); + logger->warn("Ongoing main loop event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", + ongoingEvent->sequenceNum, + event.sequenceNum, + ongoingEvent->source, + event.nodeId); } // Start event ongoingEvent = event; diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index c1d08479e..e06fec993 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -36,7 +36,7 @@ void PipelineEventDispatcher::startEvent(const std::string& source, std::optiona } event.event.setTimestamp(std::chrono::steady_clock::now()); event.event.tsDevice = event.event.ts; - event.event.sequenceNum = sequenceNum++; + ++event.event.sequenceNum; event.event.nodeId = nodeId; // TODO: event.event.metadata.emplace(metadata); event.event.interval = PipelineEvent::Interval::START; @@ -86,7 +86,7 @@ void PipelineEventDispatcher::pingEvent(const std::string& source) { } event.event.setTimestamp(now); event.event.tsDevice = event.event.ts; - event.event.sequenceNum = sequenceNum++; + ++event.event.sequenceNum; event.event.nodeId = nodeId; // TODO: event.event.metadata.emplace(metadata); event.event.interval = PipelineEvent::Interval::NONE; From 38481f0ed9ca142a3f08468f3e51f07e4054e8aa Mon Sep 17 00:00:00 2001 From: asahtik Date: Wed, 1 Oct 2025 17:46:13 +0200 Subject: [PATCH 13/40] Do not update stats for every event --- .../PipelineEventAggregationProperties.hpp | 4 ++-- .../internal/PipelineEventAggregation.cpp | 22 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp index 028f0bf09..b5195a75f 100644 --- a/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp +++ b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp @@ -8,8 +8,8 @@ namespace dai { * Specify properties for Sync. */ struct PipelineEventAggregationProperties : PropertiesSerializable { - uint32_t aggregationWindowSize = 20; - uint32_t eventBatchSize = 10; + uint32_t aggregationWindowSize = 100; + uint32_t eventBatchSize = 50; bool sendEvents = false; }; diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 035be9eaf..105f38ffe 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -13,10 +13,12 @@ class NodeEventAggregation { private: std::shared_ptr logger; - int windowSize; + uint32_t windowSize; + uint32_t eventBatchSize; public: - NodeEventAggregation(int windowSize, std::shared_ptr logger) : logger(logger), windowSize(windowSize), eventsBuffer(windowSize) {} + NodeEventAggregation(uint32_t windowSize, uint32_t eventBatchSize, std::shared_ptr logger) + : logger(logger), windowSize(windowSize), eventBatchSize(eventBatchSize), eventsBuffer(windowSize) {} NodeState state; utility::CircularBuffer eventsBuffer; std::unordered_map>> timingsBufferByType; @@ -30,7 +32,7 @@ class NodeEventAggregation { std::optional ongoingMainLoopEvent; std::unordered_map> ongoingOtherEvents; - uint32_t eventCount = 0; + uint32_t count = 0; private: inline bool updateIntervalBuffers(PipelineEvent& event) { @@ -199,14 +201,13 @@ class NodeEventAggregation { public: void add(PipelineEvent& event) { using namespace std::chrono; - ++eventCount; - bool recalculateStats = false; + bool addedEvent = false; if(event.interval == PipelineEvent::Interval::NONE) { - recalculateStats = updatePingBuffers(event); + addedEvent = updatePingBuffers(event); } else { - recalculateStats = updateIntervalBuffers(event); + addedEvent = updateIntervalBuffers(event); } - if(recalculateStats) { + if(addedEvent && ++count % eventBatchSize == 0) { // By type updateTimingStats(state.timingsByType[event.type], *timingsBufferByType[event.type]); // By instance @@ -251,7 +252,7 @@ void PipelineEventAggregation::run() { for(auto& [k, event] : events) { if(event != nullptr) { if(nodeStates.find(event->nodeId) == nodeStates.end()) { - nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(properties.aggregationWindowSize, logger)); + nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(properties.aggregationWindowSize, properties.eventBatchSize, logger)); } nodeStates.at(event->nodeId).add(*event); } @@ -259,11 +260,10 @@ void PipelineEventAggregation::run() { auto outState = std::make_shared(); bool shouldSend = false; for(auto& [nodeId, nodeState] : nodeStates) { - if(nodeState.eventCount >= properties.eventBatchSize) { + if(nodeState.count % properties.eventBatchSize == 0) { outState->nodeStates[nodeId] = nodeState.state; if(!properties.sendEvents) outState->nodeStates[nodeId].events.clear(); shouldSend = true; - nodeState.eventCount = 0; } } outState->sequenceNum = sequenceNum++; From 962d4afea79579a48eb30947ff1eec72afd10842 Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 2 Oct 2025 13:02:47 +0200 Subject: [PATCH 14/40] Fix dropped pipeline events --- .../depthai/pipeline/node/internal/PipelineEventAggregation.hpp | 2 +- src/pipeline/node/internal/PipelineEventAggregation.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp index d4a454c0a..e16fedf23 100644 --- a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp +++ b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp @@ -23,7 +23,7 @@ class PipelineEventAggregation : public DeviceNodeCRTP> events; for(auto& [k, v] : inputs) { - events[k.second] = v.get(); + events[k.second] = v.tryGet(); } for(auto& [k, event] : events) { if(event != nullptr) { From b79ded6c18d67673c4de9da1ffa4c312c5e327dd Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 2 Oct 2025 13:19:06 +0200 Subject: [PATCH 15/40] RVC4 FW: Fix dropped pipeline events --- cmake/Depthai/DepthaiDeviceRVC4Config.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake index b49f8947d..d99dca2c5 100644 --- a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake +++ b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake @@ -3,4 +3,4 @@ set(DEPTHAI_DEVICE_RVC4_MATURITY "snapshot") # "version if applicable" -set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+d9ffe3ca006f85ae4d976468c1a342828739ae18") +set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+c104182cb9ef621c71adb2ec7a203e2f03851062") From de20f54f1c03aa6d89d4780f5935690f97a5ff9e Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 2 Oct 2025 16:39:14 +0200 Subject: [PATCH 16/40] Added queue size stats --- .../datatype/PipelineEventBindings.cpp | 1 + .../datatype/PipelineStateBindings.cpp | 2 +- .../PipelineEventDispatcherBindings.cpp | 4 +-- include/depthai/pipeline/MessageQueue.hpp | 4 +-- .../pipeline/datatype/PipelineEvent.hpp | 4 ++- .../pipeline/datatype/PipelineState.hpp | 4 +-- .../utility/PipelineEventDispatcher.hpp | 12 ++++++--- .../PipelineEventDispatcherInterface.hpp | 12 ++++++--- .../internal/PipelineEventAggregation.cpp | 25 ++++++++++++++++++- src/utility/PipelineEventDispatcher.cpp | 14 ++++++----- 10 files changed, 59 insertions(+), 23 deletions(-) diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp index 65f212520..f2d028be6 100644 --- a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -46,6 +46,7 @@ void bind_pipelineevent(pybind11::module& m, void* pCallstack) { .def("__repr__", &PipelineEvent::str) .def_readwrite("nodeId", &PipelineEvent::nodeId, DOC(dai, PipelineEvent, nodeId)) .def_readwrite("metadata", &PipelineEvent::metadata, DOC(dai, PipelineEvent, metadata)) + .def_readwrite("queueSize", &PipelineEvent::metadata, DOC(dai, PipelineEvent, metadata)) .def_readwrite("interval", &PipelineEvent::interval, DOC(dai, PipelineEvent, interval)) .def_readwrite("type", &PipelineEvent::type, DOC(dai, PipelineEvent, type)) .def_readwrite("source", &PipelineEvent::source, DOC(dai, PipelineEvent, source)) diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp index eff2c7bdd..67272b9e2 100644 --- a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -70,7 +70,7 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { .def_readwrite("events", &NodeState::events, DOC(dai, NodeState, events)) .def_readwrite("timingsByType", &NodeState::timingsByType, DOC(dai, NodeState, timingsByType)) .def_readwrite("inputStates", &NodeState::inputStates, DOC(dai, NodeState, inputStates)) - .def_readwrite("outputStates", &NodeState::outputStates, DOC(dai, NodeState, outputStates)) + .def_readwrite("outputStats", &NodeState::outputStats, DOC(dai, NodeState, outputStats)) .def_readwrite("mainLoopStats", &NodeState::mainLoopStats, DOC(dai, NodeState, mainLoopStats)) .def_readwrite("otherStats", &NodeState::otherStats, DOC(dai, NodeState, otherStats)); diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp index 422edd72a..7ce1fd934 100644 --- a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp @@ -22,7 +22,7 @@ void PipelineEventDispatcherBindings::bind(pybind11::module& m, void* pCallstack .def(py::init(), py::arg("output")) .def("setNodeId", &PipelineEventDispatcher::setNodeId, py::arg("id"), DOC(dai, utility, PipelineEventDispatcher, setNodeId)) .def("addEvent", &PipelineEventDispatcher::addEvent, py::arg("source"), py::arg("type"), DOC(dai, utility, PipelineEventDispatcher, addEvent)) - .def("startEvent", &PipelineEventDispatcher::startEvent, py::arg("source"), py::arg("metadata") = std::nullopt, DOC(dai, utility, PipelineEventDispatcher, startEvent)) - .def("endEvent", &PipelineEventDispatcher::endEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, endEvent)) + .def("startEvent", &PipelineEventDispatcher::startEvent, py::arg("source"), py::arg("queueSize") = std::nullopt, py::arg("metadata") = std::nullopt, DOC(dai, utility, PipelineEventDispatcher, startEvent)) + .def("endEvent", &PipelineEventDispatcher::endEvent, py::arg("source"), py::arg("queueSize") = std::nullopt, py::arg("metadata") = std::nullopt, DOC(dai, utility, PipelineEventDispatcher, endEvent)) .def("pingEvent", &PipelineEventDispatcher::pingEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, pingEvent)); } diff --git a/include/depthai/pipeline/MessageQueue.hpp b/include/depthai/pipeline/MessageQueue.hpp index 5c39bf41e..7969aef1d 100644 --- a/include/depthai/pipeline/MessageQueue.hpp +++ b/include/depthai/pipeline/MessageQueue.hpp @@ -230,12 +230,12 @@ class MessageQueue : public std::enable_shared_from_this { */ template std::shared_ptr get() { - if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(name); + if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(name, getSize()); std::shared_ptr val = nullptr; if(!queue.waitAndPop(val)) { throw QueueException(CLOSED_QUEUE_MESSAGE); } - if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name); + if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name, getSize()); return std::dynamic_pointer_cast(val); } diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp index 0eae2cf6e..3158123c5 100644 --- a/include/depthai/pipeline/datatype/PipelineEvent.hpp +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -2,6 +2,7 @@ #include +#include "depthai/common/optional.hpp" #include "depthai/pipeline/datatype/Buffer.hpp" namespace dai { @@ -27,7 +28,8 @@ class PipelineEvent : public Buffer { virtual ~PipelineEvent() = default; int64_t nodeId = -1; - Buffer metadata; + std::optional metadata; + std::optional queueSize; Interval interval = Interval::NONE; Type type = Type::CUSTOM; std::string source; diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index ae3fc5d68..325d53704 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -41,12 +41,12 @@ class NodeState { }; std::vector events; std::unordered_map timingsByType; + std::unordered_map outputStats; std::unordered_map inputStates; - std::unordered_map outputStates; TimingStats mainLoopStats; std::unordered_map otherStats; - DEPTHAI_SERIALIZE(NodeState, events, timingsByType, inputStates, outputStates, mainLoopStats, otherStats); + DEPTHAI_SERIALIZE(NodeState, events, timingsByType, outputStats, inputStates, mainLoopStats, otherStats); }; /** diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp index 26ded4550..318c2e809 100644 --- a/include/depthai/utility/PipelineEventDispatcher.hpp +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -6,9 +6,9 @@ #include #include +#include "PipelineEventDispatcherInterface.hpp" #include "depthai/pipeline/Node.hpp" #include "depthai/pipeline/datatype/PipelineEvent.hpp" -#include "PipelineEventDispatcherInterface.hpp" namespace dai { namespace utility { @@ -35,9 +35,13 @@ class PipelineEventDispatcher : public PipelineEventDispatcherInterface { void addEvent(const std::string& source, PipelineEvent::Type type) override; - void startEvent(const std::string& source, std::optional metadata = std::nullopt) override; // Start event with a start and an end - void endEvent(const std::string& source) override; // Stop event with a start and an end - void pingEvent(const std::string& source) override; // Event where stop and start are the same (eg. loop) + void startEvent(const std::string& source, + std::optional queueSize = std::nullopt, + std::optional metadata = std::nullopt) override; // Start event with a start and an end + void endEvent(const std::string& source, + std::optional queueSize = std::nullopt, + std::optional metadata = std::nullopt) override; // Stop event with a start and an end + void pingEvent(const std::string& source) override; // Event where stop and start are the same (eg. loop) }; } // namespace utility diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp index 332281ecb..e4f6d9858 100644 --- a/include/depthai/utility/PipelineEventDispatcherInterface.hpp +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -8,13 +8,17 @@ namespace dai { namespace utility { class PipelineEventDispatcherInterface { -public: + public: virtual ~PipelineEventDispatcherInterface() = default; virtual void setNodeId(int64_t id) = 0; virtual void addEvent(const std::string& source, PipelineEvent::Type type) = 0; - virtual void startEvent(const std::string& source, std::optional metadata = std::nullopt) = 0; // Start event with a start and an end - virtual void endEvent(const std::string& source) = 0; // Stop event with a start and an end - virtual void pingEvent(const std::string& source) = 0; // Event where stop and start are the same (eg. loop) + virtual void startEvent(const std::string& source, + std::optional queueSize = std::nullopt, + std::optional metadata = std::nullopt) = 0; // Start event with a start and an end + virtual void endEvent(const std::string& source, + std::optional queueSize = std::nullopt, + std::optional metadata = std::nullopt) = 0; // Stop event with a start and an end + virtual void pingEvent(const std::string& source) = 0; // Event where stop and start are the same (eg. loop) }; } // namespace utility diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 4c381b60f..bd9def4d9 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -27,6 +27,8 @@ class NodeEventAggregation { std::unique_ptr> mainLoopTimingsBuffer; std::unordered_map>> otherTimingsBuffers; + std::unordered_map>> inputQueueSizesBuffers; + std::unordered_map> ongoingInputEvents; std::unordered_map> ongoingOutputEvents; std::optional ongoingMainLoopEvent; @@ -201,6 +203,14 @@ class NodeEventAggregation { public: void add(PipelineEvent& event) { using namespace std::chrono; + if(event.type == PipelineEvent::Type::INPUT && event.interval == PipelineEvent::Interval::END) { + if(event.queueSize.has_value()) { + if(inputQueueSizesBuffers.find(event.source) == inputQueueSizesBuffers.end()) { + inputQueueSizesBuffers[event.source] = std::make_unique>(windowSize); + } + inputQueueSizesBuffers[event.source]->add(*event.queueSize); + } + } bool addedEvent = false; if(event.interval == PipelineEvent::Interval::NONE) { addedEvent = updatePingBuffers(event); @@ -222,10 +232,23 @@ class NodeEventAggregation { updateTimingStats(state.inputStates[event.source].timingStats, *inputTimingsBuffers[event.source]); break; case PipelineEvent::Type::OUTPUT: - updateTimingStats(state.outputStates[event.source].timingStats, *outputTimingsBuffers[event.source]); + updateTimingStats(state.outputStats[event.source], *outputTimingsBuffers[event.source]); break; } } + if(event.type == PipelineEvent::Type::INPUT && event.interval == PipelineEvent::Interval::END && ++count % eventBatchSize == 0) { + auto& qStats = state.inputStates[event.source].queueStats; + auto& qBuffer = *inputQueueSizesBuffers[event.source]; + qStats.maxQueued = std::max(qStats.maxQueued, *event.queueSize); + auto qBufferData = qBuffer.getBuffer(); + std::sort(qBufferData.begin(), qBufferData.end()); + qStats.minQueuedRecent = qBufferData.front(); + qStats.maxQueuedRecent = qBufferData.back(); + qStats.medianQueuedRecent = qBufferData[qBufferData.size() / 2]; + if(qBufferData.size() % 2 == 0) { + qStats.medianQueuedRecent = (qStats.medianQueuedRecent + qBufferData[qBufferData.size() / 2 - 1]) / 2; + } + } } }; diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index e06fec993..1fdb32831 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -25,7 +25,7 @@ void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent: events[source] = {event, false}; } } -void PipelineEventDispatcher::startEvent(const std::string& source, std::optional metadata) { +void PipelineEventDispatcher::startEvent(const std::string& source, std::optional queueSize, std::optional metadata) { checkNodeId(); if(events.find(source) == events.end()) { throw std::runtime_error("Event with name " + source + " does not exist"); @@ -38,7 +38,8 @@ void PipelineEventDispatcher::startEvent(const std::string& source, std::optiona event.event.tsDevice = event.event.ts; ++event.event.sequenceNum; event.event.nodeId = nodeId; - // TODO: event.event.metadata.emplace(metadata); + event.event.metadata = std::move(metadata); + event.event.queueSize = std::move(queueSize); event.event.interval = PipelineEvent::Interval::START; // type and source are already set event.ongoing = true; @@ -47,7 +48,7 @@ void PipelineEventDispatcher::startEvent(const std::string& source, std::optiona out->send(std::make_shared(event.event)); } } -void PipelineEventDispatcher::endEvent(const std::string& source) { +void PipelineEventDispatcher::endEvent(const std::string& source, std::optional queueSize, std::optional metadata) { checkNodeId(); auto now = std::chrono::steady_clock::now(); @@ -62,7 +63,8 @@ void PipelineEventDispatcher::endEvent(const std::string& source) { event.event.setTimestamp(now); event.event.tsDevice = event.event.ts; event.event.nodeId = nodeId; - // TODO: event.event.metadata.emplace(metadata); + event.event.metadata = std::move(metadata); + event.event.queueSize = std::move(queueSize); event.event.interval = PipelineEvent::Interval::END; // type and source are already set event.ongoing = false; @@ -71,7 +73,8 @@ void PipelineEventDispatcher::endEvent(const std::string& source) { out->send(std::make_shared(event.event)); } - // event.event.metadata = 0u; TODO + event.event.metadata = std::nullopt; + event.event.queueSize = std::nullopt; } void PipelineEventDispatcher::pingEvent(const std::string& source) { checkNodeId(); @@ -88,7 +91,6 @@ void PipelineEventDispatcher::pingEvent(const std::string& source) { event.event.tsDevice = event.event.ts; ++event.event.sequenceNum; event.event.nodeId = nodeId; - // TODO: event.event.metadata.emplace(metadata); event.event.interval = PipelineEvent::Interval::NONE; // type and source are already set From 485412465eb1231ce67712ad8705c8a3b1d2a90e Mon Sep 17 00:00:00 2001 From: asahtik Date: Mon, 6 Oct 2025 11:32:18 +0200 Subject: [PATCH 17/40] WIP pipeline state api --- CMakeLists.txt | 1 + .../datatype/PipelineStateBindings.cpp | 3 +- .../python/src/pipeline/node/NodeBindings.cpp | 1 + .../node/PipelineEventAggregationBindings.cpp | 64 ++--- bindings/python/tests/messsage_queue_test.py | 4 +- examples/cpp/AprilTags/april_tags_replay.cpp | 4 +- examples/cpp/HostNodes/host_camera.cpp | 4 +- examples/cpp/HostNodes/threaded_host_node.cpp | 6 +- examples/cpp/RGBD/rgbd.cpp | 2 +- examples/cpp/RGBD/rgbd_pcl_processing.cpp | 2 +- examples/cpp/RVC2/VSLAM/rerun_node.hpp | 4 +- .../python/AprilTags/april_tags_replay.py | 2 +- examples/python/HostNodes/host_camera.py | 4 +- .../python/HostNodes/threaded_host_nodes.py | 10 +- examples/python/RGBD/rgbd.py | 2 +- examples/python/RGBD/rgbd_o3d.py | 2 +- examples/python/RGBD/rgbd_pcl_processing.py | 2 +- examples/python/RVC2/VSLAM/rerun_node.py | 4 +- .../python/Visualizer/visualizer_encoded.py | 2 +- include/depthai/pipeline/Pipeline.hpp | 218 +++++++++++++++++- include/depthai/pipeline/ThreadedNode.hpp | 4 +- .../pipeline/datatype/DatatypeEnum.hpp | 1 + .../PipelineEventAggregationConfig.hpp | 37 +++ .../pipeline/datatype/PipelineState.hpp | 9 +- .../internal/PipelineEventAggregation.hpp | 5 + .../node/internal/PipelineStateMerge.hpp | 12 +- include/depthai/utility/CircularBuffer.hpp | 10 + include/depthai/utility/ImageManipImpl.hpp | 2 +- src/basalt/BasaltVIO.cpp | 2 +- src/pipeline/InputQueue.cpp | 2 +- src/pipeline/Pipeline.cpp | 2 + src/pipeline/ThreadedNode.cpp | 8 +- src/pipeline/datatype/DatatypeEnum.cpp | 3 + .../PipelineEventAggregationConfig.cpp | 12 + src/pipeline/datatype/StreamMessageParser.cpp | 4 + src/pipeline/node/AprilTag.cpp | 2 +- src/pipeline/node/BenchmarkIn.cpp | 2 +- src/pipeline/node/DynamicCalibrationNode.cpp | 2 +- src/pipeline/node/ImageAlign.cpp | 4 +- src/pipeline/node/ImageFilters.cpp | 4 +- src/pipeline/node/ObjectTracker.cpp | 2 +- src/pipeline/node/Sync.cpp | 2 +- src/pipeline/node/host/Display.cpp | 2 +- src/pipeline/node/host/HostCamera.cpp | 4 +- src/pipeline/node/host/HostNode.cpp | 2 +- src/pipeline/node/host/RGBD.cpp | 2 +- src/pipeline/node/host/Record.cpp | 4 +- src/pipeline/node/host/Replay.cpp | 2 + .../internal/PipelineEventAggregation.cpp | 74 +++++- .../node/internal/PipelineStateMerge.cpp | 23 +- src/pipeline/node/internal/XLinkOutHost.cpp | 2 +- src/pipeline/node/test/MyProducer.cpp | 2 +- src/rtabmap/RTABMapSLAM.cpp | 2 +- src/rtabmap/RTABMapVIO.cpp | 2 +- src/utility/ProtoSerialize.cpp | 1 + .../input_output_naming_test.cpp | 2 +- 56 files changed, 498 insertions(+), 98 deletions(-) create mode 100644 include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp create mode 100644 src/pipeline/datatype/PipelineEventAggregationConfig.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index c916c9be0..237f2c5f7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -355,6 +355,7 @@ set(TARGET_CORE_SOURCES src/pipeline/datatype/PointCloudData.cpp src/pipeline/datatype/PipelineEvent.cpp src/pipeline/datatype/PipelineState.cpp + src/pipeline/datatype/PipelineEventAggregationConfig.cpp src/pipeline/datatype/RGBDData.cpp src/pipeline/datatype/MessageGroup.cpp src/pipeline/datatype/ThermalConfig.cpp diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp index 67272b9e2..d6bc03086 100644 --- a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -39,7 +39,8 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { durationEvent.def(py::init<>()) .def("__repr__", &NodeState::DurationEvent::str) .def_readwrite("startEvent", &NodeState::DurationEvent::startEvent, DOC(dai, NodeState, DurationEvent, startEvent)) - .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, TimingStats, durationUs)); + .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, TimingStats, durationUs)) + .def_readwrite("fps", &NodeState::DurationEvent::fps, DOC(dai, NodeState, TimingStats, fps)); nodeStateTimingStats.def(py::init<>()) .def("__repr__", &NodeState::TimingStats::str) diff --git a/bindings/python/src/pipeline/node/NodeBindings.cpp b/bindings/python/src/pipeline/node/NodeBindings.cpp index 4237ba403..715c689d0 100644 --- a/bindings/python/src/pipeline/node/NodeBindings.cpp +++ b/bindings/python/src/pipeline/node/NodeBindings.cpp @@ -447,6 +447,7 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack) { .def("error", [](dai::ThreadedNode& node, const std::string& msg) { node.pimpl->logger->error(msg); }) .def("critical", [](dai::ThreadedNode& node, const std::string& msg) { node.pimpl->logger->critical(msg); }) .def("isRunning", &ThreadedNode::isRunning, DOC(dai, ThreadedNode, isRunning)) + .def("mainLoop", &ThreadedNode::mainLoop, DOC(dai, ThreadedNode, mainLoop)) .def("setLogLevel", &ThreadedNode::setLogLevel, DOC(dai, ThreadedNode, setLogLevel)) .def("getLogLevel", &ThreadedNode::getLogLevel, DOC(dai, ThreadedNode, getLogLevel)); } diff --git a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp index 64d1fd539..3fcf8a616 100644 --- a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp +++ b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp @@ -3,36 +3,36 @@ #include "depthai/properties/internal/PipelineEventAggregationProperties.hpp" void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack) { - using namespace dai; - using namespace dai::node::internal; - - // Node and Properties declare upfront - py::class_ pipelineEventAggregationProperties( - m, "PipelineEventAggregationProperties", DOC(dai, PipelineEventAggregationProperties)); - auto pipelineEventAggregation = ADD_NODE(PipelineEventAggregation); - - /////////////////////////////////////////////////////////////////////// - /////////////////////////////////////////////////////////////////////// - /////////////////////////////////////////////////////////////////////// - // Call the rest of the type defines, then perform the actual bindings - Callstack* callstack = (Callstack*)pCallstack; - auto cb = callstack->top(); - callstack->pop(); - cb(m, pCallstack); - // Actual bindings - /////////////////////////////////////////////////////////////////////// - /////////////////////////////////////////////////////////////////////// - /////////////////////////////////////////////////////////////////////// - - // Properties - pipelineEventAggregationProperties.def_readwrite("aggregationWindowSize", &PipelineEventAggregationProperties::aggregationWindowSize) - .def_readwrite("eventBatchSize", &PipelineEventAggregationProperties::eventBatchSize) - .def_readwrite("sendEvents", &PipelineEventAggregationProperties::sendEvents); - - // Node - pipelineEventAggregation.def_readonly("out", &PipelineEventAggregation::out, DOC(dai, node, PipelineEventAggregation, out)) - .def_readonly("inputs", &PipelineEventAggregation::inputs, DOC(dai, node, PipelineEventAggregation, inputs)) - .def("setRunOnHost", &PipelineEventAggregation::setRunOnHost, py::arg("runOnHost"), DOC(dai, node, PipelineEventAggregation, setRunOnHost)) - .def("runOnHost", &PipelineEventAggregation::runOnHost, DOC(dai, node, PipelineEventAggregation, runOnHost)); - daiNodeModule.attr("PipelineEventAggregation").attr("Properties") = pipelineEventAggregationProperties; + // using namespace dai; + // using namespace dai::node::internal; + // + // // Node and Properties declare upfront + // py::class_ pipelineEventAggregationProperties( + // m, "PipelineEventAggregationProperties", DOC(dai, PipelineEventAggregationProperties)); + // auto pipelineEventAggregation = ADD_NODE(PipelineEventAggregation); + // + // /////////////////////////////////////////////////////////////////////// + // /////////////////////////////////////////////////////////////////////// + // /////////////////////////////////////////////////////////////////////// + // // Call the rest of the type defines, then perform the actual bindings + // Callstack* callstack = (Callstack*)pCallstack; + // auto cb = callstack->top(); + // callstack->pop(); + // cb(m, pCallstack); + // // Actual bindings + // /////////////////////////////////////////////////////////////////////// + // /////////////////////////////////////////////////////////////////////// + // /////////////////////////////////////////////////////////////////////// + // + // // Properties + // pipelineEventAggregationProperties.def_readwrite("aggregationWindowSize", &PipelineEventAggregationProperties::aggregationWindowSize) + // .def_readwrite("eventBatchSize", &PipelineEventAggregationProperties::eventBatchSize) + // .def_readwrite("sendEvents", &PipelineEventAggregationProperties::sendEvents); + // + // // Node + // pipelineEventAggregation.def_readonly("out", &PipelineEventAggregation::out, DOC(dai, node, PipelineEventAggregation, out)) + // .def_readonly("inputs", &PipelineEventAggregation::inputs, DOC(dai, node, PipelineEventAggregation, inputs)) + // .def("setRunOnHost", &PipelineEventAggregation::setRunOnHost, py::arg("runOnHost"), DOC(dai, node, PipelineEventAggregation, setRunOnHost)) + // .def("runOnHost", &PipelineEventAggregation::runOnHost, DOC(dai, node, PipelineEventAggregation, runOnHost)); + // daiNodeModule.attr("PipelineEventAggregation").attr("Properties") = pipelineEventAggregationProperties; } diff --git a/bindings/python/tests/messsage_queue_test.py b/bindings/python/tests/messsage_queue_test.py index 120c87b2e..0334ca52f 100644 --- a/bindings/python/tests/messsage_queue_test.py +++ b/bindings/python/tests/messsage_queue_test.py @@ -387,7 +387,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() self.output.send(buffer) @@ -399,7 +399,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = dai.Buffer() self.output.send(buffer) time.sleep(0.001) diff --git a/examples/cpp/AprilTags/april_tags_replay.cpp b/examples/cpp/AprilTags/april_tags_replay.cpp index c03958fa4..71779178d 100644 --- a/examples/cpp/AprilTags/april_tags_replay.cpp +++ b/examples/cpp/AprilTags/april_tags_replay.cpp @@ -27,7 +27,7 @@ class ImageReplay : public dai::NodeCRTP { return; } - while(isRunning()) { + while(mainLoop()) { // Read the frame from the camera cv::Mat frame; if(!cap.read(frame)) { @@ -96,4 +96,4 @@ int main() { pipeline.wait(); return 0; -} \ No newline at end of file +} diff --git a/examples/cpp/HostNodes/threaded_host_node.cpp b/examples/cpp/HostNodes/threaded_host_node.cpp index 90b95560c..60c02c008 100644 --- a/examples/cpp/HostNodes/threaded_host_node.cpp +++ b/examples/cpp/HostNodes/threaded_host_node.cpp @@ -10,7 +10,7 @@ class TestPassthrough : public dai::node::CustomThreadedNode { Output output = dai::Node::Output{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = input.get(); if(buffer) { std::cout << "The passthrough node received a buffer!" << std::endl; @@ -25,7 +25,7 @@ class TestSink : public dai::node::CustomThreadedNode { Input input = dai::Node::Input{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = input.get(); if(buffer) { std::cout << "The sink node received a buffer!" << std::endl; @@ -39,7 +39,7 @@ class TestSource : public dai::node::CustomThreadedNode { Output output = dai::Node::Output{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = std::make_shared(); std::cout << "The source node is sending a buffer!" << std::endl; output.send(buffer); diff --git a/examples/cpp/RGBD/rgbd.cpp b/examples/cpp/RGBD/rgbd.cpp index 59369862a..d4f73f273 100644 --- a/examples/cpp/RGBD/rgbd.cpp +++ b/examples/cpp/RGBD/rgbd.cpp @@ -15,7 +15,7 @@ class RerunNode : public dai::NodeCRTP { const auto rec = rerun::RecordingStream("rerun"); rec.spawn().exit_on_failure(); rec.log_static("world", rerun::ViewCoordinates::RDF); - while(isRunning()) { + while(mainLoop()) { auto pclIn = inputPCL.get(); auto rgbdIn = inputRGBD.get(); if(pclIn != nullptr) { diff --git a/examples/cpp/RGBD/rgbd_pcl_processing.cpp b/examples/cpp/RGBD/rgbd_pcl_processing.cpp index 54c75e6a0..41b6636de 100644 --- a/examples/cpp/RGBD/rgbd_pcl_processing.cpp +++ b/examples/cpp/RGBD/rgbd_pcl_processing.cpp @@ -16,7 +16,7 @@ class CustomPCLProcessingNode : public dai::NodeCRTP(); auto pclOut = std::make_shared(); if(pclIn != nullptr) { diff --git a/examples/cpp/RVC2/VSLAM/rerun_node.hpp b/examples/cpp/RVC2/VSLAM/rerun_node.hpp index dcea0fd91..0c7fd1a41 100644 --- a/examples/cpp/RVC2/VSLAM/rerun_node.hpp +++ b/examples/cpp/RVC2/VSLAM/rerun_node.hpp @@ -41,7 +41,7 @@ class RerunNode : public dai::NodeCRTP { rec.spawn().exit_on_failure(); rec.log_static("world", rerun::ViewCoordinates::FLU); rec.log("world/ground", rerun::Boxes3D::from_half_sizes({{3.f, 3.f, 0.00001f}})); - while(isRunning()) { + while(mainLoop()) { std::shared_ptr transData = inputTrans.get(); auto imgFrame = inputImg.get(); if(!intrinsicsSet) { @@ -107,4 +107,4 @@ class RerunNode : public dai::NodeCRTP { float fx = 400.0; float fy = 400.0; bool intrinsicsSet = false; -}; \ No newline at end of file +}; diff --git a/examples/python/AprilTags/april_tags_replay.py b/examples/python/AprilTags/april_tags_replay.py index 01cae83a9..fe6dec7f2 100644 --- a/examples/python/AprilTags/april_tags_replay.py +++ b/examples/python/AprilTags/april_tags_replay.py @@ -24,7 +24,7 @@ def __init__(self): imgFrame.setType(dai.ImgFrame.Type.GRAY8) self.imgFrame = imgFrame def run(self): - while self.isRunning(): + while self.mainLoop(): self.output.send(self.imgFrame) time.sleep(0.03) diff --git a/examples/python/HostNodes/host_camera.py b/examples/python/HostNodes/host_camera.py index fd458b84b..83dd78089 100644 --- a/examples/python/HostNodes/host_camera.py +++ b/examples/python/HostNodes/host_camera.py @@ -13,7 +13,7 @@ def run(self): if not cap.isOpened(): p.stop() raise RuntimeError("Error: Couldn't open host camera") - while self.isRunning(): + while self.mainLoop(): # Read the frame from the camera ret, frame = cap.read() if not ret: @@ -40,4 +40,4 @@ def run(self): key = cv2.waitKey(1) if key == ord('q'): p.stop() - break \ No newline at end of file + break diff --git a/examples/python/HostNodes/threaded_host_nodes.py b/examples/python/HostNodes/threaded_host_nodes.py index 7b3c50827..4118fd788 100644 --- a/examples/python/HostNodes/threaded_host_nodes.py +++ b/examples/python/HostNodes/threaded_host_nodes.py @@ -31,7 +31,7 @@ def onStop(self): print("Goodbye from", self.name) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() print("The passthrough node received a buffer!") self.output.send(buffer) @@ -47,7 +47,7 @@ def onStart(self): print("Hello, this is", self.name) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() del buffer print(f"{self.name} node received a buffer!") @@ -59,7 +59,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = dai.Buffer() print(f"{self.name} node is sending a buffer!") self.output.send(buffer) @@ -77,7 +77,7 @@ def __init__(self, name: str): self.passthrough1.output.link(self.passthrough2.input) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() self.output.send(buffer) @@ -105,4 +105,4 @@ def run(self): p.start() while p.isRunning(): time.sleep(1) - print("Pipeline is running...") \ No newline at end of file + print("Pipeline is running...") diff --git a/examples/python/RGBD/rgbd.py b/examples/python/RGBD/rgbd.py index 4f2202097..5e15d796d 100644 --- a/examples/python/RGBD/rgbd.py +++ b/examples/python/RGBD/rgbd.py @@ -21,7 +21,7 @@ def run(self): rr.init("", spawn=True) rr.log("world", rr.ViewCoordinates.RDF) rr.log("world/ground", rr.Boxes3D(half_sizes=[3.0, 3.0, 0.00001])) - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.get() except dai.MessageQueue.QueueException: diff --git a/examples/python/RGBD/rgbd_o3d.py b/examples/python/RGBD/rgbd_o3d.py index 0f7d2787f..5ba514015 100644 --- a/examples/python/RGBD/rgbd_o3d.py +++ b/examples/python/RGBD/rgbd_o3d.py @@ -33,7 +33,7 @@ def key_callback(vis, action, mods): ) vis.add_geometry(coordinateFrame) first = True - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.tryGet() except dai.MessageQueue.QueueException: diff --git a/examples/python/RGBD/rgbd_pcl_processing.py b/examples/python/RGBD/rgbd_pcl_processing.py index 804d83e77..0b1930bac 100644 --- a/examples/python/RGBD/rgbd_pcl_processing.py +++ b/examples/python/RGBD/rgbd_pcl_processing.py @@ -11,7 +11,7 @@ def __init__(self): self.thresholdDistance = 3000.0 def run(self): - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.get() except dai.MessageQueue.QueueException: diff --git a/examples/python/RVC2/VSLAM/rerun_node.py b/examples/python/RVC2/VSLAM/rerun_node.py index 4211b7ef1..caefda601 100644 --- a/examples/python/RVC2/VSLAM/rerun_node.py +++ b/examples/python/RVC2/VSLAM/rerun_node.py @@ -36,7 +36,7 @@ def run(self): rr.init("", spawn=True) rr.log("world", rr.ViewCoordinates.FLU) rr.log("world/ground", rr.Boxes3D(half_sizes=[3.0, 3.0, 0.00001])) - while self.isRunning(): + while self.mainLoop(): transData = self.inputTrans.get() imgFrame = self.inputImg.get() if not self.intrinsicsSet: @@ -63,4 +63,4 @@ def run(self): points, colors = pclGrndData.getPointsRGB() rr.log("world/ground_pcl", rr.Points3D(points, colors=colors, radii=[0.01])) if mapData is not None: - rr.log("map", rr.Image(mapData.getCvFrame())) \ No newline at end of file + rr.log("map", rr.Image(mapData.getCvFrame())) diff --git a/examples/python/Visualizer/visualizer_encoded.py b/examples/python/Visualizer/visualizer_encoded.py index 9c6ab195a..78d0eca6f 100644 --- a/examples/python/Visualizer/visualizer_encoded.py +++ b/examples/python/Visualizer/visualizer_encoded.py @@ -20,7 +20,7 @@ def __init__(self): def setLabelMap(self, labelMap): self.labelMap = labelMap def run(self): - while self.isRunning(): + while self.mainLoop(): nnData = self.inputDet.get() detections = nnData.detections imgAnnt = dai.ImgAnnotations() diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index df460980e..2f686ec36 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -15,14 +15,15 @@ #include "depthai/device/CalibrationHandler.hpp" #include "depthai/device/Device.hpp" #include "depthai/openvino/OpenVINO.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" #include "depthai/utility/AtomicBool.hpp" // shared #include "depthai/device/BoardConfig.hpp" #include "depthai/pipeline/PipelineSchema.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" #include "depthai/properties/GlobalProperties.hpp" #include "depthai/utility/RecordReplay.hpp" -#include "depthai/pipeline/datatype/PipelineState.hpp" namespace dai { @@ -242,6 +243,221 @@ class PipelineImpl : public std::enable_shared_from_this { std::vector loadResourceCwd(fs::path uri, fs::path cwd, bool moveAsset = false); }; +/** + * pipeline.getState().nodes({nodeId1}).summary() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).detailed() -> std::unordered_map; + * pipeline.getState().nodes(nodeId1).detailed() -> NodeState; + * pipeline.getState().nodes({nodeId1}).outputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs({outputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(outputName) -> TimingStats; + * pipeline.getState().nodes({nodeId1}).events(); + * pipeline.getState().nodes({nodeId1}).inputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs({inputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs(inputName) -> QueueState; + * pipeline.getState().nodes({nodeId1}).otherStats() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).otherStats({statName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(statName) -> TimingStats; + */ +template +class NodeStateApi { + static_assert(false); +}; +template <> +class NodeStateApi> { + std::vector nodeIds; + + public: + explicit NodeStateApi(std::vector nodeIds) : nodeIds(std::move(nodeIds)) {} + std::unordered_map> summary() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.summary = true; + cfg.nodes.push_back(nodeCfg); + } + + // TODO send and get + return {}; + } + PipelineState detailed() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.summary = true; // contains main loop timing + nodeCfg.inputs = {}; // send all + nodeCfg.outputs = {}; // send all + nodeCfg.others = {}; // send all + cfg.nodes.push_back(nodeCfg); + } + + // TODO send and get + return {}; + } + std::unordered_map> outputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.outputs = {}; // send all + cfg.nodes.push_back(nodeCfg); + } + + // TODO send and get + return {}; + } + std::unordered_map> inputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.inputs = {}; // send all + cfg.nodes.push_back(nodeCfg); + } + + // TODO send and get + return {}; + } + std::unordered_map> otherStats() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.others = {}; // send all + cfg.nodes.push_back(nodeCfg); + } + + // TODO send and get + return {}; + } +}; +template <> +class NodeStateApi { + Node::Id nodeId; + + public: + explicit NodeStateApi(Node::Id nodeId) : nodeId(nodeId) {} + std::unordered_map summary() { + return NodeStateApi>({nodeId}).summary()[nodeId]; + } + NodeState detailed() { + return NodeStateApi>({nodeId}).detailed().nodeStates[nodeId]; + } + std::unordered_map outputs() { + return NodeStateApi>({nodeId}).outputs()[nodeId]; + } + std::unordered_map inputs() { + return NodeStateApi>({nodeId}).inputs()[nodeId]; + } + std::unordered_map otherStats() { + return NodeStateApi>({nodeId}).otherStats()[nodeId]; + } + std::unordered_map outputs(const std::vector& outputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = outputNames; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + NodeState::TimingStats outputs(const std::string& outputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {outputName}; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + std::unordered_map> events() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.events = true; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + std::unordered_map inputs(const std::vector& inputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = inputNames; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + NodeState::QueueState inputs(const std::string& inputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = {inputName}; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + std::unordered_map otherStats(const std::vector& statNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = statNames; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + NodeState::TimingStats otherStats(const std::string& statName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = {statName}; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } +}; +class PipelineStateApi { + public: + NodeStateApi> nodes(const std::vector& nodeIds) { + return NodeStateApi>(nodeIds); + } + NodeStateApi nodes(Node::Id nodeId) { + return NodeStateApi(nodeId); + } +}; + /** * @brief Represents the pipeline, set of nodes and connections between them */ diff --git a/include/depthai/pipeline/ThreadedNode.hpp b/include/depthai/pipeline/ThreadedNode.hpp index 9ef4d836a..84475b1fc 100644 --- a/include/depthai/pipeline/ThreadedNode.hpp +++ b/include/depthai/pipeline/ThreadedNode.hpp @@ -51,7 +51,9 @@ class ThreadedNode : public Node { virtual void run() = 0; // check if still running - bool isRunning(); + bool isRunning() const; + + bool mainLoop(); /** * @brief Sets the logging severity level for this node. diff --git a/include/depthai/pipeline/datatype/DatatypeEnum.hpp b/include/depthai/pipeline/datatype/DatatypeEnum.hpp index a501ad7f4..7be2fa4f7 100644 --- a/include/depthai/pipeline/datatype/DatatypeEnum.hpp +++ b/include/depthai/pipeline/datatype/DatatypeEnum.hpp @@ -45,6 +45,7 @@ enum class DatatypeEnum : std::int32_t { CoverageData, PipelineEvent, PipelineState, + PipelineEventAggregationConfig, }; bool isDatatypeSubclassOf(DatatypeEnum parent, DatatypeEnum children); diff --git a/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp new file mode 100644 index 000000000..afa804b0c --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp @@ -0,0 +1,37 @@ +#pragma once +#include +#include + +#include "depthai/common/optional.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" +#include "depthai/pipeline/datatype/DatatypeEnum.hpp" + +namespace dai { + +class NodeEventAggregationConfig { + public: + int64_t nodeId = -1; + std::optional> inputs; + std::optional> outputs; + std::optional> others; + bool summary = false; + bool events = false; + + DEPTHAI_SERIALIZE(NodeEventAggregationConfig, nodeId, inputs, outputs, others, summary, events); +}; + +/// PipelineEventAggregationConfig configuration structure +class PipelineEventAggregationConfig : public Buffer { + public: + std::vector nodes; + bool repeat = false; // Keep sending the aggregated state without waiting for new config + + PipelineEventAggregationConfig() = default; + virtual ~PipelineEventAggregationConfig(); + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override; + + DEPTHAI_SERIALIZE(PipelineEventAggregationConfig, nodes, repeat); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index 325d53704..d6665c292 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -13,7 +13,8 @@ class NodeState { struct DurationEvent { PipelineEvent startEvent; uint64_t durationUs; - DEPTHAI_SERIALIZE(DurationEvent, startEvent, durationUs); + float fps; + DEPTHAI_SERIALIZE(DurationEvent, startEvent, durationUs, fps); }; struct TimingStats { uint64_t minMicros = -1; @@ -23,7 +24,8 @@ class NodeState { uint64_t minMicrosRecent = -1; uint64_t maxMicrosRecent; uint64_t medianMicrosRecent; - DEPTHAI_SERIALIZE(TimingStats, minMicros, maxMicros, averageMicrosRecent, stdDevMicrosRecent, minMicrosRecent, maxMicrosRecent, medianMicrosRecent); + float fps; + DEPTHAI_SERIALIZE(TimingStats, minMicros, maxMicros, averageMicrosRecent, stdDevMicrosRecent, minMicrosRecent, maxMicrosRecent, medianMicrosRecent, fps); }; struct QueueStats { uint32_t maxQueued; @@ -58,13 +60,14 @@ class PipelineState : public Buffer { virtual ~PipelineState() = default; std::unordered_map nodeStates; + uint32_t configSequenceNum = 0; void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { metadata = utility::serialize(*this); datatype = DatatypeEnum::PipelineState; }; - DEPTHAI_SERIALIZE(PipelineState, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeStates); + DEPTHAI_SERIALIZE(PipelineState, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeStates, configSequenceNum); }; } // namespace dai diff --git a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp index e16fedf23..2ca556841 100644 --- a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp +++ b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp @@ -25,6 +25,11 @@ class PipelineEventAggregation : public DeviceNodeCRTP { Input inputDevice{*this, {"inputDevice", DEFAULT_GROUP, false, 4, {{DatatypeEnum::PipelineState, false}}}}; Input inputHost{*this, {"inputHost", DEFAULT_GROUP, false, 4, {{DatatypeEnum::PipelineState, false}}}}; - std::shared_ptr build(bool hasDeviceNodes, bool hasHostNodes); + /** + * Input PipelineEventAggregationConfig message with state request parameters + */ + Input request{*this, {"request", DEFAULT_GROUP, DEFAULT_BLOCKING, DEFAULT_QUEUE_SIZE, {{{DatatypeEnum::PipelineEventAggregationConfig, false}}}, false}}; + + /** + * Output PipelineEventAggregationConfig message with state request parameters + */ + Output outRequest{*this, {"outRequest", DEFAULT_GROUP, {{{DatatypeEnum::PipelineEventAggregationConfig, false}}}}}; /** * Output message of type */ Output out{*this, {"out", DEFAULT_GROUP, {{{DatatypeEnum::PipelineState, false}}}}}; + std::shared_ptr build(bool hasDeviceNodes, bool hasHostNodes); + void run() override; }; diff --git a/include/depthai/utility/CircularBuffer.hpp b/include/depthai/utility/CircularBuffer.hpp index 6e386eb19..c4cf3c181 100644 --- a/include/depthai/utility/CircularBuffer.hpp +++ b/include/depthai/utility/CircularBuffer.hpp @@ -29,6 +29,16 @@ class CircularBuffer { } return result; } + T first() const { + if(buffer.empty()) { + throw std::runtime_error("CircularBuffer is empty"); + } + if(buffer.size() < maxSize) { + return buffer.front(); + } else { + return buffer[index]; + } + } T last() const { if(buffer.empty()) { throw std::runtime_error("CircularBuffer is empty"); diff --git a/include/depthai/utility/ImageManipImpl.hpp b/include/depthai/utility/ImageManipImpl.hpp index 505e0e09c..d3c8dea9e 100644 --- a/include/depthai/utility/ImageManipImpl.hpp +++ b/include/depthai/utility/ImageManipImpl.hpp @@ -59,7 +59,7 @@ void loop(N& node, std::shared_ptr inImage; - while(node.isRunning()) { + while(node.mainLoop()) { std::shared_ptr pConfig; bool hasConfig = false; bool needsImage = true; diff --git a/src/basalt/BasaltVIO.cpp b/src/basalt/BasaltVIO.cpp index ac2b30c8e..1eda40d08 100644 --- a/src/basalt/BasaltVIO.cpp +++ b/src/basalt/BasaltVIO.cpp @@ -53,7 +53,7 @@ void BasaltVIO::run() { Eigen::Quaterniond q(R); basalt::PoseState::SE3 opticalTransform(q, Eigen::Vector3d(0, 0, 0)); - while(isRunning()) { + while(mainLoop()) { if(!initialized) continue; pimpl->outStateQueue->pop(data); diff --git a/src/pipeline/InputQueue.cpp b/src/pipeline/InputQueue.cpp index 818f418d6..70526c9b1 100644 --- a/src/pipeline/InputQueue.cpp +++ b/src/pipeline/InputQueue.cpp @@ -14,7 +14,7 @@ InputQueue::InputQueueNode::InputQueueNode(unsigned int maxSize, bool blocking) } void InputQueue::InputQueueNode::run() { - while(isRunning()) { + while(mainLoop()) { output.send(input.get()); } } diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index b002329bd..1ee73c20a 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -671,9 +671,11 @@ void PipelineImpl::build() { auto stateMerge = parent.create()->build(hasDeviceNodes, hasHostNodes); if(deviceEventAgg) { deviceEventAgg->out.link(stateMerge->inputDevice); + stateMerge->outRequest.link(deviceEventAgg->request); } if(hostEventAgg) { hostEventAgg->out.link(stateMerge->inputHost); + stateMerge->outRequest.link(hostEventAgg->request); } pipelineStateOut = stateMerge->out.createOutputQueue(1, false); } diff --git a/src/pipeline/ThreadedNode.cpp b/src/pipeline/ThreadedNode.cpp index 3c5e79b38..242f97db4 100644 --- a/src/pipeline/ThreadedNode.cpp +++ b/src/pipeline/ThreadedNode.cpp @@ -89,9 +89,13 @@ dai::LogLevel ThreadedNode::getLogLevel() const { return spdlogLevelToLogLevel(pimpl->logger->level(), LogLevel::WARN); } -bool ThreadedNode::isRunning() { - this->pipelineEventDispatcher->pingEvent("_mainLoop"); +bool ThreadedNode::isRunning() const { return running; } +bool ThreadedNode::mainLoop() { + this->pipelineEventDispatcher->pingEvent("_mainLoop"); + return isRunning(); +} + } // namespace dai diff --git a/src/pipeline/datatype/DatatypeEnum.cpp b/src/pipeline/datatype/DatatypeEnum.cpp index 91bfb3231..2d3a3e8ff 100644 --- a/src/pipeline/datatype/DatatypeEnum.cpp +++ b/src/pipeline/datatype/DatatypeEnum.cpp @@ -38,6 +38,7 @@ const std::unordered_map> hierarchy = { DatatypeEnum::MessageGroup, DatatypeEnum::PipelineEvent, DatatypeEnum::PipelineState, + DatatypeEnum::PipelineEventAggregationConfig, DatatypeEnum::PointCloudConfig, DatatypeEnum::PointCloudData, DatatypeEnum::RGBDData, @@ -77,6 +78,7 @@ const std::unordered_map> hierarchy = { DatatypeEnum::MessageGroup, DatatypeEnum::PipelineEvent, DatatypeEnum::PipelineState, + DatatypeEnum::PipelineEventAggregationConfig, DatatypeEnum::PointCloudConfig, DatatypeEnum::PointCloudData, DatatypeEnum::RGBDData, @@ -114,6 +116,7 @@ const std::unordered_map> hierarchy = { {DatatypeEnum::MessageGroup, {}}, {DatatypeEnum::PipelineEvent, {}}, {DatatypeEnum::PipelineState, {}}, + {DatatypeEnum::PipelineEventAggregationConfig, {}}, {DatatypeEnum::PointCloudConfig, {}}, {DatatypeEnum::PointCloudData, {}}, {DatatypeEnum::RGBDData, {}}, diff --git a/src/pipeline/datatype/PipelineEventAggregationConfig.cpp b/src/pipeline/datatype/PipelineEventAggregationConfig.cpp new file mode 100644 index 000000000..8ef38e1ba --- /dev/null +++ b/src/pipeline/datatype/PipelineEventAggregationConfig.cpp @@ -0,0 +1,12 @@ +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" + +namespace dai { + +PipelineEventAggregationConfig::~PipelineEventAggregationConfig() = default; + +void PipelineEventAggregationConfig::serialize(std::vector& metadata, DatatypeEnum& datatype) const { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineEventAggregationConfig; +} + +} // namespace dai diff --git a/src/pipeline/datatype/StreamMessageParser.cpp b/src/pipeline/datatype/StreamMessageParser.cpp index 7ec39151d..f25884b02 100644 --- a/src/pipeline/datatype/StreamMessageParser.cpp +++ b/src/pipeline/datatype/StreamMessageParser.cpp @@ -16,6 +16,7 @@ #include "depthai/pipeline/datatype/Buffer.hpp" #include "depthai/pipeline/datatype/CameraControl.hpp" #include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" #include "depthai/pipeline/datatype/PipelineState.hpp" #ifdef DEPTHAI_HAVE_DYNAMIC_CALIBRATION_SUPPORT #include "depthai/pipeline/datatype/DynamicCalibrationControl.hpp" @@ -246,6 +247,9 @@ std::shared_ptr StreamMessageParser::parseMessage(streamPacketDesc_t* case DatatypeEnum::PipelineState: return parseDatatype(metadataStart, serializedObjectSize, data, fd); break; + case DatatypeEnum::PipelineEventAggregationConfig: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; case DatatypeEnum::MessageGroup: return parseDatatype(metadataStart, serializedObjectSize, data, fd); break; diff --git a/src/pipeline/node/AprilTag.cpp b/src/pipeline/node/AprilTag.cpp index 7fe371e2a..765af8d42 100644 --- a/src/pipeline/node/AprilTag.cpp +++ b/src/pipeline/node/AprilTag.cpp @@ -206,7 +206,7 @@ void AprilTag::run() { // Handle possible errors during configuration handleErrors(errno); - while(isRunning()) { + while(mainLoop()) { // Retrieve config from user if available if(properties.inputConfigSync) { inConfig = inputConfig.get(); diff --git a/src/pipeline/node/BenchmarkIn.cpp b/src/pipeline/node/BenchmarkIn.cpp index 539f748a4..634a0153d 100644 --- a/src/pipeline/node/BenchmarkIn.cpp +++ b/src/pipeline/node/BenchmarkIn.cpp @@ -55,7 +55,7 @@ void BenchmarkIn::run() { auto start = steady_clock::now(); - while(isRunning()) { + while(mainLoop()) { auto inMessage = input.get(); // If this is the first message of the batch, reset counters diff --git a/src/pipeline/node/DynamicCalibrationNode.cpp b/src/pipeline/node/DynamicCalibrationNode.cpp index cd32d0f91..c9da5c484 100644 --- a/src/pipeline/node/DynamicCalibrationNode.cpp +++ b/src/pipeline/node/DynamicCalibrationNode.cpp @@ -557,7 +557,7 @@ void DynamicCalibration::run() { auto previousLoadingTimeFloat = std::chrono::steady_clock::now() + std::chrono::duration(calibrationPeriod); auto previousLoadingTime = std::chrono::time_point_cast(previousLoadingTimeFloat); initializePipeline(device); - while(isRunning()) { + while(mainLoop()) { slept = false; doWork(previousLoadingTime); if(!slept) { diff --git a/src/pipeline/node/ImageAlign.cpp b/src/pipeline/node/ImageAlign.cpp index 794001de8..d1da2a960 100644 --- a/src/pipeline/node/ImageAlign.cpp +++ b/src/pipeline/node/ImageAlign.cpp @@ -353,7 +353,7 @@ void ImageAlign::run() { ImgFrame inputAlignToImgFrame; uint32_t currentEepromId = getParentPipeline().getEepromId(); - while(isRunning()) { + while(mainLoop()) { auto inputImg = input.get(); if(!initialized) { @@ -585,4 +585,4 @@ void ImageAlign::run() { #endif // DEPTHAI_HAVE_OPENCV_SUPPORT } // namespace node -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/src/pipeline/node/ImageFilters.cpp b/src/pipeline/node/ImageFilters.cpp index 5c5757201..b474d6284 100644 --- a/src/pipeline/node/ImageFilters.cpp +++ b/src/pipeline/node/ImageFilters.cpp @@ -818,7 +818,7 @@ void ImageFilters::run() { filters.size() == 0, getFilterPipelineString()); - while(isRunning()) { + while(mainLoop()) { // Set config while(inputConfig.has()) { auto configMsg = inputConfig.get(); @@ -946,7 +946,7 @@ void ToFDepthConfidenceFilter::applyDepthConfidenceFilter(std::shared_ptr(); diff --git a/src/pipeline/node/ObjectTracker.cpp b/src/pipeline/node/ObjectTracker.cpp index 1756c40e7..fd2ee507f 100644 --- a/src/pipeline/node/ObjectTracker.cpp +++ b/src/pipeline/node/ObjectTracker.cpp @@ -98,7 +98,7 @@ void ObjectTracker::run() { impl::OCSTracker tracker(properties); - while(isRunning()) { + while(mainLoop()) { std::shared_ptr inputTrackerImg; std::shared_ptr inputDetectionImg; std::shared_ptr inputImgDetections; diff --git a/src/pipeline/node/Sync.cpp b/src/pipeline/node/Sync.cpp index 5d0048969..c22f82af1 100644 --- a/src/pipeline/node/Sync.cpp +++ b/src/pipeline/node/Sync.cpp @@ -50,7 +50,7 @@ void Sync::run() { auto syncThresholdNs = properties.syncThresholdNs; logger->trace("Sync threshold: {}", syncThresholdNs); - while(isRunning()) { + while(mainLoop()) { auto tAbsoluteBeginning = steady_clock::now(); std::unordered_map> inputFrames; for(auto name : inputNames) { diff --git a/src/pipeline/node/host/Display.cpp b/src/pipeline/node/host/Display.cpp index 257a4fc8f..1e7488a88 100644 --- a/src/pipeline/node/host/Display.cpp +++ b/src/pipeline/node/host/Display.cpp @@ -37,7 +37,7 @@ Display::Display(std::string name) : name(std::move(name)) {} void Display::run() { auto fpsCounter = FPSCounter(); - while(isRunning()) { + while(mainLoop()) { std::shared_ptr imgFrame = input.get(); if(imgFrame != nullptr) { fpsCounter.update(); diff --git a/src/pipeline/node/host/HostCamera.cpp b/src/pipeline/node/host/HostCamera.cpp index 7b5cfac4c..f698f9d26 100644 --- a/src/pipeline/node/host/HostCamera.cpp +++ b/src/pipeline/node/host/HostCamera.cpp @@ -13,7 +13,7 @@ void HostCamera::run() { throw std::runtime_error("Couldn't open camera"); } int64_t seqNum = 0; - while(isRunning()) { + while(mainLoop()) { cv::Mat frame; auto success = cap.read(frame); if(frame.empty() || !success) { @@ -36,4 +36,4 @@ void HostCamera::run() { } } // namespace node -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/src/pipeline/node/host/HostNode.cpp b/src/pipeline/node/host/HostNode.cpp index 885a3a533..e5d6522b3 100644 --- a/src/pipeline/node/host/HostNode.cpp +++ b/src/pipeline/node/host/HostNode.cpp @@ -17,7 +17,7 @@ void HostNode::buildStage1() { } void HostNode::run() { - while(isRunning()) { + while(mainLoop()) { // Get input auto in = input.get(); // Create a lambda that captures the class as a shared pointer and the message diff --git a/src/pipeline/node/host/RGBD.cpp b/src/pipeline/node/host/RGBD.cpp index 92fd67d84..5f964b1b9 100644 --- a/src/pipeline/node/host/RGBD.cpp +++ b/src/pipeline/node/host/RGBD.cpp @@ -363,7 +363,7 @@ void RGBD::initialize(std::shared_ptr frames) { } void RGBD::run() { - while(isRunning()) { + while(mainLoop()) { if(!pcl.getQueueConnections().empty() || !pcl.getConnections().empty() || !rgbd.getQueueConnections().empty() || !rgbd.getConnections().empty()) { // Get the color and depth frames auto group = inSync.get(); diff --git a/src/pipeline/node/host/Record.cpp b/src/pipeline/node/host/Record.cpp index 4604d2bf7..36f6316d5 100644 --- a/src/pipeline/node/host/Record.cpp +++ b/src/pipeline/node/host/Record.cpp @@ -53,7 +53,7 @@ void RecordVideo::run() { unsigned int i = 0; auto start = std::chrono::steady_clock::now(); auto end = std::chrono::steady_clock::now(); - while(isRunning()) { + while(mainLoop()) { auto msg = input.get(); if(msg == nullptr) continue; if(streamType == DatatypeEnum::ADatatype) { @@ -150,7 +150,7 @@ void RecordMetadataOnly::run() { utility::ByteRecorder byteRecorder; DatatypeEnum streamType = DatatypeEnum::ADatatype; - while(isRunning()) { + while(mainLoop()) { auto msg = input.get(); if(msg == nullptr) continue; if(streamType == DatatypeEnum::ADatatype) { diff --git a/src/pipeline/node/host/Replay.cpp b/src/pipeline/node/host/Replay.cpp index 70e00d7fc..d995179c6 100644 --- a/src/pipeline/node/host/Replay.cpp +++ b/src/pipeline/node/host/Replay.cpp @@ -99,6 +99,7 @@ inline std::shared_ptr getMessage(const std::shared_ptr getProtoMessage(utility::ByteP case DatatypeEnum::CoverageData: case DatatypeEnum::PipelineEvent: case DatatypeEnum::PipelineState: + case DatatypeEnum::PipelineEventAggregationConfig: throw std::runtime_error("Cannot replay message type: " + std::to_string((int)datatype)); } return {}; diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index bd9def4d9..670c24083 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -1,5 +1,7 @@ #include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" +#include + #include "depthai/pipeline/datatype/PipelineEvent.hpp" #include "depthai/pipeline/datatype/PipelineState.hpp" #include "depthai/utility/CircularBuffer.hpp" @@ -11,6 +13,11 @@ namespace internal { class NodeEventAggregation { private: + struct FpsMeasurement { + std::chrono::steady_clock::time_point time; + int64_t sequenceNum; + }; + std::shared_ptr logger; uint32_t windowSize; @@ -29,6 +36,10 @@ class NodeEventAggregation { std::unordered_map>> inputQueueSizesBuffers; + std::unordered_map>> inputFpsBuffers; + std::unordered_map>> outputFpsBuffers; + std::unordered_map>> otherFpsBuffers; + std::unordered_map> ongoingInputEvents; std::unordered_map> ongoingOutputEvents; std::optional ongoingMainLoopEvent; @@ -39,6 +50,9 @@ class NodeEventAggregation { private: inline bool updateIntervalBuffers(PipelineEvent& event) { using namespace std::chrono; + std::unique_ptr> emptyIntBuffer; + std::unique_ptr> emptyTimeBuffer; + auto& ongoingEvent = [&]() -> std::optional& { switch(event.type) { case PipelineEvent::Type::LOOP: @@ -72,7 +86,29 @@ class NodeEventAggregation { } return otherTimingsBuffers[event.source]; } - return mainLoopTimingsBuffer; // To silence compiler warning + return emptyIntBuffer; // To silence compiler warning + }(); + auto& fpsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + throw std::runtime_error("LOOP event should not be an interval"); + case PipelineEvent::Type::INPUT: + if(inputFpsBuffers.find(event.source) == inputFpsBuffers.end()) { + inputFpsBuffers[event.source] = std::make_unique>(windowSize); + } + return inputFpsBuffers[event.source]; + case PipelineEvent::Type::OUTPUT: + if(outputFpsBuffers.find(event.source) == outputFpsBuffers.end()) { + outputFpsBuffers[event.source] = std::make_unique>(windowSize); + } + return outputFpsBuffers[event.source]; + case PipelineEvent::Type::CUSTOM: + if(otherFpsBuffers.find(event.source) == otherFpsBuffers.end()) { + otherFpsBuffers[event.source] = std::make_unique>(windowSize); + } + return otherFpsBuffers[event.source]; + } + return emptyTimeBuffer; // To silence compiler warning }(); if(ongoingEvent.has_value() && ongoingEvent->sequenceNum == event.sequenceNum && event.interval == PipelineEvent::Interval::END) { // End event @@ -87,6 +123,7 @@ class NodeEventAggregation { } timingsBufferByType[event.type]->add(durationEvent.durationUs); timingsBuffer->add(durationEvent.durationUs); + fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); ongoingEvent = std::nullopt; @@ -109,6 +146,9 @@ class NodeEventAggregation { inline bool updatePingBuffers(PipelineEvent& event) { using namespace std::chrono; + std::unique_ptr> emptyIntBuffer; + std::unique_ptr> emptyTimeBuffer; + auto& ongoingEvent = [&]() -> std::optional& { switch(event.type) { case PipelineEvent::Type::LOOP: @@ -134,7 +174,22 @@ class NodeEventAggregation { case PipelineEvent::Type::OUTPUT: throw std::runtime_error("INPUT and OUTPUT events should not be pings"); } - return mainLoopTimingsBuffer; // To silence compiler warning + return emptyIntBuffer; // To silence compiler warning + }(); + auto& fpsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + case PipelineEvent::Type::LOOP: + break; + case PipelineEvent::Type::CUSTOM: + if(otherFpsBuffers.find(event.source) == otherFpsBuffers.end()) { + otherFpsBuffers[event.source] = std::make_unique>(windowSize); + } + return otherFpsBuffers[event.source]; + } + return emptyTimeBuffer; // To silence compiler warning }(); if(ongoingEvent.has_value() && ongoingEvent->sequenceNum == event.sequenceNum - 1) { // End event @@ -152,6 +207,7 @@ class NodeEventAggregation { } timingsBufferByType[event.type]->add(durationEvent.durationUs); timingsBuffer->add(durationEvent.durationUs); + if(fpsBuffer) fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); // Start event ongoingEvent = event; @@ -199,6 +255,14 @@ class NodeEventAggregation { stats.medianMicrosRecent = (stats.medianMicrosRecent + bufferByType[bufferByType.size() / 2 - 1]) / 2; } } + inline void updateFpsStats(NodeState::TimingStats& stats, const utility::CircularBuffer& buffer) { + if(buffer.size() < 2) return; + auto timeDiff = std::chrono::duration_cast(buffer.last().time - buffer.first().time).count(); + auto frameDiff = buffer.last().sequenceNum - buffer.first().sequenceNum; + if(timeDiff > 0 && buffer.last().sequenceNum > buffer.first().sequenceNum) { + stats.fps = frameDiff * (1e6f / (float)timeDiff); + } + } public: void add(PipelineEvent& event) { @@ -224,15 +288,19 @@ class NodeEventAggregation { switch(event.type) { case PipelineEvent::Type::CUSTOM: updateTimingStats(state.otherStats[event.source], *otherTimingsBuffers[event.source]); + updateFpsStats(state.otherStats[event.source], *otherFpsBuffers[event.source]); break; case PipelineEvent::Type::LOOP: updateTimingStats(state.mainLoopStats, *mainLoopTimingsBuffer); + state.mainLoopStats.fps = 1e6f / (float)state.mainLoopStats.averageMicrosRecent; break; case PipelineEvent::Type::INPUT: updateTimingStats(state.inputStates[event.source].timingStats, *inputTimingsBuffers[event.source]); + updateFpsStats(state.inputStates[event.source].timingStats, *inputFpsBuffers[event.source]); break; case PipelineEvent::Type::OUTPUT: updateTimingStats(state.outputStats[event.source], *outputTimingsBuffers[event.source]); + updateFpsStats(state.outputStats[event.source], *outputFpsBuffers[event.source]); break; } } @@ -267,7 +335,7 @@ void PipelineEventAggregation::run() { auto& logger = pimpl->logger; std::unordered_map nodeStates; uint32_t sequenceNum = 0; - while(isRunning()) { + while(mainLoop()) { std::unordered_map> events; for(auto& [k, v] : inputs) { events[k.second] = v.tryGet(); diff --git a/src/pipeline/node/internal/PipelineStateMerge.cpp b/src/pipeline/node/internal/PipelineStateMerge.cpp index 177ec3325..157c6c4c1 100644 --- a/src/pipeline/node/internal/PipelineStateMerge.cpp +++ b/src/pipeline/node/internal/PipelineStateMerge.cpp @@ -1,7 +1,7 @@ #include "depthai/pipeline/node/internal/PipelineStateMerge.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" #include "depthai/pipeline/datatype/PipelineState.hpp" - #include "pipeline/ThreadedNodeImpl.hpp" namespace dai { @@ -17,7 +17,8 @@ void mergeStates(std::shared_ptr& outState, const std::shared_ptr for(const auto& [key, value] : inState->nodeStates) { if(outState->nodeStates.find(key) != outState->nodeStates.end()) { throw std::runtime_error("PipelineStateMerge: Duplicate node state for nodeId " + std::to_string(key)); - } else outState->nodeStates[key] = value; + } else + outState->nodeStates[key] = value; } } void PipelineStateMerge::run() { @@ -25,11 +26,27 @@ void PipelineStateMerge::run() { if(!hasDeviceNodes && !hasHostNodes) { logger->warn("PipelineStateMerge: both device and host nodes are disabled. Have you built the node?"); } + std::optional currentConfig; uint32_t sequenceNum = 0; - while(isRunning()) { + while(mainLoop()) { auto outState = std::make_shared(); + bool waitForMatch = false; + if(!currentConfig.has_value() || (currentConfig.has_value() && !currentConfig->repeat) || request.has()) { + auto req = request.get(); + if(req != nullptr) { + currentConfig = *req; + currentConfig->setSequenceNum(++sequenceNum); + waitForMatch = true; + } + } if(hasDeviceNodes) { auto deviceState = inputDevice.get(); + if(waitForMatch && deviceState != nullptr && currentConfig.has_value()) { + while(isRunning() && deviceState->sequenceNum != currentConfig->sequenceNum) { + deviceState = inputDevice.get(); + if(!isRunning()) break; + } + } if(deviceState != nullptr) { *outState = *deviceState; } diff --git a/src/pipeline/node/internal/XLinkOutHost.cpp b/src/pipeline/node/internal/XLinkOutHost.cpp index c7890fbce..de5d782d4 100644 --- a/src/pipeline/node/internal/XLinkOutHost.cpp +++ b/src/pipeline/node/internal/XLinkOutHost.cpp @@ -53,7 +53,7 @@ void XLinkOutHost::run() { stream = XLinkStream(this->conn, this->streamName, maxSize); currentMaxSize = maxSize; }; - while(isRunning()) { + while(mainLoop()) { try { auto outgoing = in.get(); auto metadata = StreamMessageParser::serializeMetadata(outgoing); diff --git a/src/pipeline/node/test/MyProducer.cpp b/src/pipeline/node/test/MyProducer.cpp index 260d6807f..611cc14ad 100644 --- a/src/pipeline/node/test/MyProducer.cpp +++ b/src/pipeline/node/test/MyProducer.cpp @@ -12,7 +12,7 @@ namespace test { void MyProducer::run() { std::cout << "Hello, I just woke up and I'm ready to do some work!\n"; - while(isRunning()) { + while(mainLoop()) { std::this_thread::sleep_for(std::chrono::milliseconds(1500)); auto buf = std::make_shared(); diff --git a/src/rtabmap/RTABMapSLAM.cpp b/src/rtabmap/RTABMapSLAM.cpp index 65243135b..d7a5ed0d8 100644 --- a/src/rtabmap/RTABMapSLAM.cpp +++ b/src/rtabmap/RTABMapSLAM.cpp @@ -115,7 +115,7 @@ void RTABMapSLAM::odomPoseCB(std::shared_ptr data) { void RTABMapSLAM::run() { auto& logger = pimpl->logger; - while(isRunning()) { + while(mainLoop()) { if(!initialized) { continue; } else { diff --git a/src/rtabmap/RTABMapVIO.cpp b/src/rtabmap/RTABMapVIO.cpp index da82d0924..55f1ddae5 100644 --- a/src/rtabmap/RTABMapVIO.cpp +++ b/src/rtabmap/RTABMapVIO.cpp @@ -129,7 +129,7 @@ void RTABMapVIO::syncCB(std::shared_ptr data) { } void RTABMapVIO::run() { - while(isRunning()) { + while(mainLoop()) { auto msg = inSync.get(); if(msg != nullptr) { syncCB(msg); diff --git a/src/utility/ProtoSerialize.cpp b/src/utility/ProtoSerialize.cpp index d94870502..5894fcbb7 100644 --- a/src/utility/ProtoSerialize.cpp +++ b/src/utility/ProtoSerialize.cpp @@ -172,6 +172,7 @@ bool deserializationSupported(DatatypeEnum datatype) { case DatatypeEnum::CoverageData: case DatatypeEnum::PipelineEvent: case DatatypeEnum::PipelineState: + case DatatypeEnum::PipelineEventAggregationConfig: return false; } return false; diff --git a/tests/src/ondevice_tests/input_output_naming_test.cpp b/tests/src/ondevice_tests/input_output_naming_test.cpp index b246788b2..d845ced6e 100644 --- a/tests/src/ondevice_tests/input_output_naming_test.cpp +++ b/tests/src/ondevice_tests/input_output_naming_test.cpp @@ -8,7 +8,7 @@ class TestNode : public dai::node::CustomThreadedNode { public: void run() override { - while(isRunning()) { + while(mainLoop()) { // do nothing } } From c34cc45b15f9bad1df7b92130e9aa02ff547987e Mon Sep 17 00:00:00 2001 From: asahtik Date: Mon, 6 Oct 2025 12:31:38 +0200 Subject: [PATCH 18/40] Implement service like api for pipeline state --- .../internal/PipelineEventAggregation.cpp | 21 +++++++++++++++---- .../node/internal/PipelineStateMerge.cpp | 11 ++++++++-- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 670c24083..cc778cb2e 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -3,6 +3,7 @@ #include #include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" #include "depthai/pipeline/datatype/PipelineState.hpp" #include "depthai/utility/CircularBuffer.hpp" #include "pipeline/ThreadedNodeImpl.hpp" @@ -334,6 +335,7 @@ bool PipelineEventAggregation::runOnHost() const { void PipelineEventAggregation::run() { auto& logger = pimpl->logger; std::unordered_map nodeStates; + std::optional currentConfig; uint32_t sequenceNum = 0; while(mainLoop()) { std::unordered_map> events; @@ -357,10 +359,21 @@ void PipelineEventAggregation::run() { shouldSend = true; } } - outState->sequenceNum = sequenceNum++; - outState->setTimestamp(std::chrono::steady_clock::now()); - outState->tsDevice = outState->ts; - if(shouldSend) out.send(outState); + bool gotConfig = false; + if(!currentConfig.has_value() || (currentConfig.has_value() && !currentConfig->repeat) || request.has()) { + auto req = request.get(); + if(req != nullptr) { + currentConfig = *req; + gotConfig = true; + } + } + if(gotConfig || (currentConfig.has_value() && currentConfig->repeat)) { + outState->sequenceNum = sequenceNum++; + outState->configSequenceNum = currentConfig.has_value() ? currentConfig->sequenceNum : 0; + outState->setTimestamp(std::chrono::steady_clock::now()); + outState->tsDevice = outState->ts; + if(gotConfig || shouldSend) out.send(outState); + } } } diff --git a/src/pipeline/node/internal/PipelineStateMerge.cpp b/src/pipeline/node/internal/PipelineStateMerge.cpp index 157c6c4c1..d532ec161 100644 --- a/src/pipeline/node/internal/PipelineStateMerge.cpp +++ b/src/pipeline/node/internal/PipelineStateMerge.cpp @@ -28,6 +28,7 @@ void PipelineStateMerge::run() { } std::optional currentConfig; uint32_t sequenceNum = 0; + uint32_t configSequenceNum = 0; while(mainLoop()) { auto outState = std::make_shared(); bool waitForMatch = false; @@ -35,14 +36,14 @@ void PipelineStateMerge::run() { auto req = request.get(); if(req != nullptr) { currentConfig = *req; - currentConfig->setSequenceNum(++sequenceNum); + currentConfig->setSequenceNum(++configSequenceNum); waitForMatch = true; } } if(hasDeviceNodes) { auto deviceState = inputDevice.get(); if(waitForMatch && deviceState != nullptr && currentConfig.has_value()) { - while(isRunning() && deviceState->sequenceNum != currentConfig->sequenceNum) { + while(isRunning() && deviceState->configSequenceNum != currentConfig->sequenceNum) { deviceState = inputDevice.get(); if(!isRunning()) break; } @@ -53,6 +54,12 @@ void PipelineStateMerge::run() { } if(hasHostNodes) { auto hostState = inputHost.get(); + if(waitForMatch && hostState != nullptr && currentConfig.has_value()) { + while(isRunning() && hostState->configSequenceNum != currentConfig->sequenceNum) { + hostState = inputHost.get(); + if(!isRunning()) break; + } + } if(hostState != nullptr) { if(hasDeviceNodes) { // merge From fcbfe787f713d5df9d64bb06bf01026602a0d426 Mon Sep 17 00:00:00 2001 From: asahtik Date: Mon, 6 Oct 2025 15:44:19 +0200 Subject: [PATCH 19/40] Remove static assert --- include/depthai/pipeline/Pipeline.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 2f686ec36..bb6dae7c3 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -260,7 +260,6 @@ class PipelineImpl : public std::enable_shared_from_this { */ template class NodeStateApi { - static_assert(false); }; template <> class NodeStateApi> { From 28e5b93386dbbfbc02a6a2abaf73415eb3f0fa4b Mon Sep 17 00:00:00 2001 From: asahtik Date: Tue, 7 Oct 2025 08:50:26 +0200 Subject: [PATCH 20/40] Bugfixes --- examples/cpp/HostNodes/image_manip_host.cpp | 7 +- include/depthai/pipeline/Pipeline.hpp | 458 +++++++++--------- src/pipeline/Pipeline.cpp | 8 +- .../internal/PipelineEventAggregation.cpp | 1 + .../node/internal/PipelineStateMerge.cpp | 1 + 5 files changed, 254 insertions(+), 221 deletions(-) diff --git a/examples/cpp/HostNodes/image_manip_host.cpp b/examples/cpp/HostNodes/image_manip_host.cpp index c43d75c37..6bf7edafa 100644 --- a/examples/cpp/HostNodes/image_manip_host.cpp +++ b/examples/cpp/HostNodes/image_manip_host.cpp @@ -40,5 +40,10 @@ int main(int argc, char** argv) { pipeline.start(); - pipeline.wait(); + while(pipeline.isRunning()) { + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); + std::cout << "Pipeline state: " << pipeline.getPipelineState().nodes().detailed().str() << std::endl; + } + + pipeline.stop(); } diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index bb6dae7c3..1739a7907 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -20,6 +20,7 @@ // shared #include "depthai/device/BoardConfig.hpp" +#include "depthai/pipeline/InputQueue.hpp" #include "depthai/pipeline/PipelineSchema.hpp" #include "depthai/pipeline/datatype/PipelineState.hpp" #include "depthai/properties/GlobalProperties.hpp" @@ -29,6 +30,244 @@ namespace dai { namespace fs = std::filesystem; +/** + * pipeline.getState().nodes({nodeId1}).summary() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).detailed() -> std::unordered_map; + * pipeline.getState().nodes(nodeId1).detailed() -> NodeState; + * pipeline.getState().nodes({nodeId1}).outputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs({outputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(outputName) -> TimingStats; + * pipeline.getState().nodes({nodeId1}).events(); + * pipeline.getState().nodes({nodeId1}).inputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs({inputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs(inputName) -> QueueState; + * pipeline.getState().nodes({nodeId1}).otherStats() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).otherStats({statName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(statName) -> TimingStats; + */ +template +class NodeStateApi {}; +template <> +class NodeStateApi> { + std::vector nodeIds; + + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + + public: + explicit NodeStateApi(std::vector nodeIds, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + : nodeIds(std::move(nodeIds)), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} + std::unordered_map> summary() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.summary = true; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + return {}; // TODO + } + PipelineState detailed() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.summary = true; // contains main loop timing + nodeCfg.inputs = {}; // send all + nodeCfg.outputs = {}; // send all + nodeCfg.others = {}; // send all + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + return *state; + } + std::unordered_map> outputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.outputs = {}; // send all + cfg.nodes.push_back(nodeCfg); + } + + // TODO send and get + return {}; + } + std::unordered_map> inputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.inputs = {}; // send all + cfg.nodes.push_back(nodeCfg); + } + + // TODO send and get + return {}; + } + std::unordered_map> otherStats() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.others = {}; // send all + cfg.nodes.push_back(nodeCfg); + } + + // TODO send and get + return {}; + } +}; +template <> +class NodeStateApi { + Node::Id nodeId; + + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + + public: + explicit NodeStateApi(Node::Id nodeId, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + : nodeId(nodeId), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} + std::unordered_map summary() { + return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).summary()[nodeId]; + } + NodeState detailed() { + return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).detailed().nodeStates[nodeId]; + } + std::unordered_map outputs() { + return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).outputs()[nodeId]; + } + std::unordered_map inputs() { + return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).inputs()[nodeId]; + } + std::unordered_map otherStats() { + return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).otherStats()[nodeId]; + } + std::unordered_map outputs(const std::vector& outputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = outputNames; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + NodeState::TimingStats outputs(const std::string& outputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {outputName}; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + std::unordered_map> events() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.events = true; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + std::unordered_map inputs(const std::vector& inputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = inputNames; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + NodeState::QueueState inputs(const std::string& inputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = {inputName}; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + std::unordered_map otherStats(const std::vector& statNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = statNames; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } + NodeState::TimingStats otherStats(const std::string& statName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = {statName}; + cfg.nodes.push_back(nodeCfg); + + // TODO send and get + return {}; + } +}; +class PipelineStateApi { + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + std::vector nodeIds; // empty means all nodes + + public: + PipelineStateApi(std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest, const std::vector>& allNodes) + : pipelineStateOut(std::move(pipelineStateOut)), pipelineStateRequest(std::move(pipelineStateRequest)) { + for(const auto& n : allNodes) { + nodeIds.push_back(n->id); + } + } + NodeStateApi> nodes() { + return NodeStateApi>(nodeIds, pipelineStateOut, pipelineStateRequest); + } + NodeStateApi> nodes(const std::vector& nodeIds) { + return NodeStateApi>(nodeIds, pipelineStateOut, pipelineStateRequest); + } + NodeStateApi nodes(Node::Id nodeId) { + return NodeStateApi(nodeId, pipelineStateOut, pipelineStateRequest); + } +}; + class PipelineImpl : public std::enable_shared_from_this { friend class Pipeline; friend class Node; @@ -88,7 +327,7 @@ class PipelineImpl : public std::enable_shared_from_this { bool isDeviceOnly() const; // Pipeline state getters - std::shared_ptr getPipelineState(); + PipelineStateApi getPipelineState(); // Must be incremented and unique for each node Node::Id latestId = 0; @@ -123,6 +362,7 @@ class PipelineImpl : public std::enable_shared_from_this { // Pipeline events std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; // Output queues std::vector> outputQueues; @@ -243,220 +483,6 @@ class PipelineImpl : public std::enable_shared_from_this { std::vector loadResourceCwd(fs::path uri, fs::path cwd, bool moveAsset = false); }; -/** - * pipeline.getState().nodes({nodeId1}).summary() -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).detailed() -> std::unordered_map; - * pipeline.getState().nodes(nodeId1).detailed() -> NodeState; - * pipeline.getState().nodes({nodeId1}).outputs() -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).outputs({outputName1}) -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).outputs(outputName) -> TimingStats; - * pipeline.getState().nodes({nodeId1}).events(); - * pipeline.getState().nodes({nodeId1}).inputs() -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).inputs({inputName1}) -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).inputs(inputName) -> QueueState; - * pipeline.getState().nodes({nodeId1}).otherStats() -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).otherStats({statName1}) -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).outputs(statName) -> TimingStats; - */ -template -class NodeStateApi { -}; -template <> -class NodeStateApi> { - std::vector nodeIds; - - public: - explicit NodeStateApi(std::vector nodeIds) : nodeIds(std::move(nodeIds)) {} - std::unordered_map> summary() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.summary = true; - cfg.nodes.push_back(nodeCfg); - } - - // TODO send and get - return {}; - } - PipelineState detailed() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.summary = true; // contains main loop timing - nodeCfg.inputs = {}; // send all - nodeCfg.outputs = {}; // send all - nodeCfg.others = {}; // send all - cfg.nodes.push_back(nodeCfg); - } - - // TODO send and get - return {}; - } - std::unordered_map> outputs() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.outputs = {}; // send all - cfg.nodes.push_back(nodeCfg); - } - - // TODO send and get - return {}; - } - std::unordered_map> inputs() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.inputs = {}; // send all - cfg.nodes.push_back(nodeCfg); - } - - // TODO send and get - return {}; - } - std::unordered_map> otherStats() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.others = {}; // send all - cfg.nodes.push_back(nodeCfg); - } - - // TODO send and get - return {}; - } -}; -template <> -class NodeStateApi { - Node::Id nodeId; - - public: - explicit NodeStateApi(Node::Id nodeId) : nodeId(nodeId) {} - std::unordered_map summary() { - return NodeStateApi>({nodeId}).summary()[nodeId]; - } - NodeState detailed() { - return NodeStateApi>({nodeId}).detailed().nodeStates[nodeId]; - } - std::unordered_map outputs() { - return NodeStateApi>({nodeId}).outputs()[nodeId]; - } - std::unordered_map inputs() { - return NodeStateApi>({nodeId}).inputs()[nodeId]; - } - std::unordered_map otherStats() { - return NodeStateApi>({nodeId}).otherStats()[nodeId]; - } - std::unordered_map outputs(const std::vector& outputNames) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.outputs = outputNames; - cfg.nodes.push_back(nodeCfg); - - // TODO send and get - return {}; - } - NodeState::TimingStats outputs(const std::string& outputName) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.outputs = {outputName}; - cfg.nodes.push_back(nodeCfg); - - // TODO send and get - return {}; - } - std::unordered_map> events() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.events = true; - cfg.nodes.push_back(nodeCfg); - - // TODO send and get - return {}; - } - std::unordered_map inputs(const std::vector& inputNames) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.inputs = inputNames; - cfg.nodes.push_back(nodeCfg); - - // TODO send and get - return {}; - } - NodeState::QueueState inputs(const std::string& inputName) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.inputs = {inputName}; - cfg.nodes.push_back(nodeCfg); - - // TODO send and get - return {}; - } - std::unordered_map otherStats(const std::vector& statNames) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.others = statNames; - cfg.nodes.push_back(nodeCfg); - - // TODO send and get - return {}; - } - NodeState::TimingStats otherStats(const std::string& statName) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.others = {statName}; - cfg.nodes.push_back(nodeCfg); - - // TODO send and get - return {}; - } -}; -class PipelineStateApi { - public: - NodeStateApi> nodes(const std::vector& nodeIds) { - return NodeStateApi>(nodeIds); - } - NodeStateApi nodes(Node::Id nodeId) { - return NodeStateApi(nodeId); - } -}; - /** * @brief Represents the pipeline, set of nodes and connections between them */ @@ -735,7 +761,7 @@ class Pipeline { void enableHolisticReplay(const std::string& pathToRecording); // Pipeline state getters - std::shared_ptr getPipelineState(); + PipelineStateApi getPipelineState(); }; } // namespace dai diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 1ee73c20a..737cea306 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -479,9 +479,8 @@ bool PipelineImpl::isDeviceOnly() const { return deviceOnly; } -std::shared_ptr PipelineImpl::getPipelineState() { - auto pipelineState = pipelineStateOut->get(); - return pipelineState; +PipelineStateApi PipelineImpl::getPipelineState() { + return PipelineStateApi(pipelineStateOut, pipelineStateRequest, getAllNodes()); } void PipelineImpl::add(std::shared_ptr node) { @@ -678,6 +677,7 @@ void PipelineImpl::build() { stateMerge->outRequest.link(hostEventAgg->request); } pipelineStateOut = stateMerge->out.createOutputQueue(1, false); + pipelineStateRequest = stateMerge->request.createInputQueue(); } isBuild = true; @@ -1094,7 +1094,7 @@ void Pipeline::enableHolisticReplay(const std::string& pathToRecording) { impl()->enableHolisticRecordReplay = true; } -std::shared_ptr Pipeline::getPipelineState() { +PipelineStateApi Pipeline::getPipelineState() { return impl()->getPipelineState(); } diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index cc778cb2e..308df35fd 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -372,6 +372,7 @@ void PipelineEventAggregation::run() { outState->configSequenceNum = currentConfig.has_value() ? currentConfig->sequenceNum : 0; outState->setTimestamp(std::chrono::steady_clock::now()); outState->tsDevice = outState->ts; + // TODO: send only requested data if(gotConfig || shouldSend) out.send(outState); } } diff --git a/src/pipeline/node/internal/PipelineStateMerge.cpp b/src/pipeline/node/internal/PipelineStateMerge.cpp index d532ec161..c3d4f7a38 100644 --- a/src/pipeline/node/internal/PipelineStateMerge.cpp +++ b/src/pipeline/node/internal/PipelineStateMerge.cpp @@ -37,6 +37,7 @@ void PipelineStateMerge::run() { if(req != nullptr) { currentConfig = *req; currentConfig->setSequenceNum(++configSequenceNum); + outRequest.send(std::make_shared(currentConfig.value())); waitForMatch = true; } } From 215119d5cca6715c5e4514ec58bf25f1ce0b6c8e Mon Sep 17 00:00:00 2001 From: asahtik Date: Tue, 7 Oct 2025 11:34:04 +0200 Subject: [PATCH 21/40] Collect events in a different thread --- include/depthai/pipeline/MessageQueue.hpp | 6 +- src/pipeline/node/host/Replay.cpp | 4 +- .../internal/PipelineEventAggregation.cpp | 98 ++++++++++++++----- src/utility/PipelineEventDispatcher.cpp | 3 + 4 files changed, 84 insertions(+), 27 deletions(-) diff --git a/include/depthai/pipeline/MessageQueue.hpp b/include/depthai/pipeline/MessageQueue.hpp index 7969aef1d..c4d72a7fb 100644 --- a/include/depthai/pipeline/MessageQueue.hpp +++ b/include/depthai/pipeline/MessageQueue.hpp @@ -201,16 +201,16 @@ class MessageQueue : public std::enable_shared_from_this { */ template std::shared_ptr tryGet() { - if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(name); + if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(name, getSize()); if(queue.isDestroyed()) { throw QueueException(CLOSED_QUEUE_MESSAGE); } std::shared_ptr val = nullptr; if(!queue.tryPop(val)) { - if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name); + if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name, getSize()); return nullptr; } - if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name); + if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name, getSize()); return std::dynamic_pointer_cast(val); } diff --git a/src/pipeline/node/host/Replay.cpp b/src/pipeline/node/host/Replay.cpp index d995179c6..f35f5dfb9 100644 --- a/src/pipeline/node/host/Replay.cpp +++ b/src/pipeline/node/host/Replay.cpp @@ -214,7 +214,7 @@ void ReplayVideo::run() { uint64_t index = 0; auto loopStart = std::chrono::steady_clock::now(); auto prevMsgTs = loopStart; - while(isRunning()) { + while(mainLoop()) { std::shared_ptr metadata; std::vector frame; if(hasMetadata) { @@ -341,7 +341,7 @@ void ReplayMetadataOnly::run() { bool first = true; auto loopStart = std::chrono::steady_clock::now(); auto prevMsgTs = loopStart; - while(isRunning()) { + while(mainLoop()) { std::shared_ptr metadata; std::vector frame; if(!utility::deserializationSupported(datatype)) { diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 308df35fd..6859e939e 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -1,6 +1,7 @@ #include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" #include +#include #include "depthai/pipeline/datatype/PipelineEvent.hpp" #include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" @@ -274,6 +275,8 @@ class NodeEventAggregation { inputQueueSizesBuffers[event.source] = std::make_unique>(windowSize); } inputQueueSizesBuffers[event.source]->add(*event.queueSize); + } else { + throw std::runtime_error(fmt::format("INPUT END event must have queue size set source: {}, node {}", event.source, event.nodeId)); } } bool addedEvent = false; @@ -321,6 +324,67 @@ class NodeEventAggregation { } }; +class PipelineEventHandler { + std::unordered_map nodeStates; + Node::InputMap* inputs; + uint32_t aggregationWindowSize; + uint32_t eventBatchSize; + + std::atomic running; + + std::thread thread; + + std::shared_ptr logger; + + std::shared_mutex mutex; + + public: + PipelineEventHandler( + Node::InputMap* inputs, uint32_t aggregationWindowSize, uint32_t eventBatchSize, std::shared_ptr logger) + : inputs(inputs), aggregationWindowSize(aggregationWindowSize), eventBatchSize(eventBatchSize), logger(logger) {} + void threadedRun() { + while(running) { + std::unordered_map> events; + bool gotEvents = false; + for(auto& [k, v] : *inputs) { + events[k.second] = v.tryGet(); + gotEvents = gotEvents || (events[k.second] != nullptr); + } + for(auto& [k, event] : events) { + if(event != nullptr) { + if(nodeStates.find(event->nodeId) == nodeStates.end()) { + nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(aggregationWindowSize, eventBatchSize, logger)); + } + { + std::unique_lock lock(mutex); + nodeStates.at(event->nodeId).add(*event); + } + } + } + if(!gotEvents) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + } + void run() { + running = true; + thread = std::thread(&PipelineEventHandler::threadedRun, this); + } + void stop() { + running = false; + if(thread.joinable()) thread.join(); + } + bool getState(std::shared_ptr outState) { + std::shared_lock lock(mutex); + bool updated = false; + for(auto& [nodeId, nodeState] : nodeStates) { + outState->nodeStates[nodeId] = nodeState.state; + if(nodeState.count % eventBatchSize == 0) updated = true; + } + return updated; + } +}; + void PipelineEventAggregation::setRunOnHost(bool runOnHost) { runOnHostVar = runOnHost; } @@ -334,31 +398,14 @@ bool PipelineEventAggregation::runOnHost() const { void PipelineEventAggregation::run() { auto& logger = pimpl->logger; - std::unordered_map nodeStates; + + PipelineEventHandler handler(&inputs, properties.aggregationWindowSize, properties.eventBatchSize, logger); + handler.run(); + std::optional currentConfig; uint32_t sequenceNum = 0; while(mainLoop()) { - std::unordered_map> events; - for(auto& [k, v] : inputs) { - events[k.second] = v.tryGet(); - } - for(auto& [k, event] : events) { - if(event != nullptr) { - if(nodeStates.find(event->nodeId) == nodeStates.end()) { - nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(properties.aggregationWindowSize, properties.eventBatchSize, logger)); - } - nodeStates.at(event->nodeId).add(*event); - } - } auto outState = std::make_shared(); - bool shouldSend = false; - for(auto& [nodeId, nodeState] : nodeStates) { - if(nodeState.count % properties.eventBatchSize == 0) { - outState->nodeStates[nodeId] = nodeState.state; - if(!properties.sendEvents) outState->nodeStates[nodeId].events.clear(); - shouldSend = true; - } - } bool gotConfig = false; if(!currentConfig.has_value() || (currentConfig.has_value() && !currentConfig->repeat) || request.has()) { auto req = request.get(); @@ -368,14 +415,21 @@ void PipelineEventAggregation::run() { } } if(gotConfig || (currentConfig.has_value() && currentConfig->repeat)) { + bool updated = handler.getState(outState); + for(auto& [nodeId, nodeState] : outState->nodeStates) { + if(!properties.sendEvents) nodeState.events.clear(); + } + outState->sequenceNum = sequenceNum++; outState->configSequenceNum = currentConfig.has_value() ? currentConfig->sequenceNum : 0; outState->setTimestamp(std::chrono::steady_clock::now()); outState->tsDevice = outState->ts; // TODO: send only requested data - if(gotConfig || shouldSend) out.send(outState); + if(gotConfig || (currentConfig.has_value() && currentConfig->repeat && updated)) out.send(outState); } + std::this_thread::sleep_for(std::chrono::milliseconds(10)); } + handler.stop(); } } // namespace internal diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index 1fdb32831..33bff7a04 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -26,6 +26,7 @@ void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent: } } void PipelineEventDispatcher::startEvent(const std::string& source, std::optional queueSize, std::optional metadata) { + // TODO add mutex checkNodeId(); if(events.find(source) == events.end()) { throw std::runtime_error("Event with name " + source + " does not exist"); @@ -49,6 +50,7 @@ void PipelineEventDispatcher::startEvent(const std::string& source, std::optiona } } void PipelineEventDispatcher::endEvent(const std::string& source, std::optional queueSize, std::optional metadata) { + // TODO add mutex checkNodeId(); auto now = std::chrono::steady_clock::now(); @@ -77,6 +79,7 @@ void PipelineEventDispatcher::endEvent(const std::string& source, std::optional< event.event.queueSize = std::nullopt; } void PipelineEventDispatcher::pingEvent(const std::string& source) { + // TODO add mutex checkNodeId(); auto now = std::chrono::steady_clock::now(); From e8045e473279e0ff1bd26f5c050c8e43b56205f8 Mon Sep 17 00:00:00 2001 From: asahtik Date: Tue, 7 Oct 2025 18:00:34 +0200 Subject: [PATCH 22/40] WIP: dispatcher + state improvements --- .../datatype/PipelineStateBindings.cpp | 3 +- include/depthai/pipeline/MessageQueue.hpp | 9 +- include/depthai/pipeline/ThreadedNode.hpp | 7 +- .../pipeline/datatype/PipelineEvent.hpp | 5 +- .../pipeline/datatype/PipelineState.hpp | 47 ++++++-- include/depthai/utility/ImageManipImpl.hpp | 74 +++++++------ .../utility/PipelineEventDispatcher.hpp | 27 +++-- .../PipelineEventDispatcherInterface.hpp | 38 +++++-- src/pipeline/MessageQueue.cpp | 6 +- src/pipeline/Node.cpp | 8 +- src/pipeline/ThreadedNode.cpp | 18 +++- .../internal/PipelineEventAggregation.cpp | 69 ++++++++---- src/utility/PipelineEventDispatcher.cpp | 100 ++++++++++++------ 13 files changed, 279 insertions(+), 132 deletions(-) diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp index d6bc03086..9f40eaf72 100644 --- a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -69,9 +69,10 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { nodeState.def(py::init<>()) .def("__repr__", &NodeState::str) .def_readwrite("events", &NodeState::events, DOC(dai, NodeState, events)) - .def_readwrite("timingsByType", &NodeState::timingsByType, DOC(dai, NodeState, timingsByType)) .def_readwrite("inputStates", &NodeState::inputStates, DOC(dai, NodeState, inputStates)) .def_readwrite("outputStats", &NodeState::outputStats, DOC(dai, NodeState, outputStats)) + .def_readwrite("inputGroupStats", &NodeState::inputGroupStats, DOC(dai, NodeState, inputGroupStats)) + .def_readwrite("outputGroupStats", &NodeState::outputGroupStats, DOC(dai, NodeState, outputGroupStats)) .def_readwrite("mainLoopStats", &NodeState::mainLoopStats, DOC(dai, NodeState, mainLoopStats)) .def_readwrite("otherStats", &NodeState::otherStats, DOC(dai, NodeState, otherStats)); diff --git a/include/depthai/pipeline/MessageQueue.hpp b/include/depthai/pipeline/MessageQueue.hpp index c4d72a7fb..43e1d31e6 100644 --- a/include/depthai/pipeline/MessageQueue.hpp +++ b/include/depthai/pipeline/MessageQueue.hpp @@ -201,16 +201,15 @@ class MessageQueue : public std::enable_shared_from_this { */ template std::shared_ptr tryGet() { - if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(name, getSize()); + if(pipelineEventDispatcher) pipelineEventDispatcher->startInputEvent(name, getSize()); if(queue.isDestroyed()) { throw QueueException(CLOSED_QUEUE_MESSAGE); } std::shared_ptr val = nullptr; if(!queue.tryPop(val)) { - if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name, getSize()); return nullptr; } - if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name, getSize()); + if(pipelineEventDispatcher) pipelineEventDispatcher->endInputEvent(name, getSize()); return std::dynamic_pointer_cast(val); } @@ -230,12 +229,12 @@ class MessageQueue : public std::enable_shared_from_this { */ template std::shared_ptr get() { - if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(name, getSize()); + if(pipelineEventDispatcher) pipelineEventDispatcher->startInputEvent(name, getSize()); std::shared_ptr val = nullptr; if(!queue.waitAndPop(val)) { throw QueueException(CLOSED_QUEUE_MESSAGE); } - if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(name, getSize()); + if(pipelineEventDispatcher) pipelineEventDispatcher->endInputEvent(name, getSize()); return std::dynamic_pointer_cast(val); } diff --git a/include/depthai/pipeline/ThreadedNode.hpp b/include/depthai/pipeline/ThreadedNode.hpp index 84475b1fc..d5a2147b0 100644 --- a/include/depthai/pipeline/ThreadedNode.hpp +++ b/include/depthai/pipeline/ThreadedNode.hpp @@ -5,7 +5,6 @@ #include "depthai/utility/AtomicBool.hpp" #include "depthai/utility/JoiningThread.hpp" #include "depthai/utility/spimpl.h" -#include "depthai/utility/PipelineEventDispatcher.hpp" namespace dai { @@ -16,7 +15,7 @@ class ThreadedNode : public Node { JoiningThread thread; AtomicBool running{false}; - protected: + protected: Output pipelineEventOutput{*this, {"pipelineEventOutput", DEFAULT_GROUP, {{{DatatypeEnum::PipelineEvent, false}}}}}; void initPipelineEventDispatcher(int64_t nodeId); @@ -69,6 +68,10 @@ class ThreadedNode : public Node { */ virtual dai::LogLevel getLogLevel() const; + utility::PipelineEventDispatcherInterface::BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup"); + utility::PipelineEventDispatcherInterface::BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup"); + utility::PipelineEventDispatcherInterface::BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source); + class Impl; spimpl::impl_ptr pimpl; }; diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp index 3158123c5..b0c17f215 100644 --- a/include/depthai/pipeline/datatype/PipelineEvent.hpp +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -17,6 +17,8 @@ class PipelineEvent : public Buffer { LOOP = 1, INPUT = 2, OUTPUT = 3, + INPUT_BLOCK = 4, + OUTPUT_BLOCK = 5, }; enum class Interval : std::int32_t { NONE = 0, @@ -28,7 +30,6 @@ class PipelineEvent : public Buffer { virtual ~PipelineEvent() = default; int64_t nodeId = -1; - std::optional metadata; std::optional queueSize; Interval interval = Interval::NONE; Type type = Type::CUSTOM; @@ -39,7 +40,7 @@ class PipelineEvent : public Buffer { datatype = DatatypeEnum::PipelineEvent; }; - DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, metadata, interval, type, source); + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, interval, type, source); }; } // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index d6665c292..e30833c92 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -24,8 +24,12 @@ class NodeState { uint64_t minMicrosRecent = -1; uint64_t maxMicrosRecent; uint64_t medianMicrosRecent; + DEPTHAI_SERIALIZE(TimingStats, minMicros, maxMicros, averageMicrosRecent, stdDevMicrosRecent, minMicrosRecent, maxMicrosRecent, medianMicrosRecent); + }; + struct Timing { float fps; - DEPTHAI_SERIALIZE(TimingStats, minMicros, maxMicros, averageMicrosRecent, stdDevMicrosRecent, minMicrosRecent, maxMicrosRecent, medianMicrosRecent, fps); + TimingStats durationStats; + DEPTHAI_SERIALIZE(Timing, fps, durationStats); }; struct QueueStats { uint32_t maxQueued; @@ -34,21 +38,42 @@ class NodeState { uint32_t medianQueuedRecent; DEPTHAI_SERIALIZE(QueueStats, maxQueued, minQueuedRecent, maxQueuedRecent, medianQueuedRecent); }; - struct QueueState { - bool waiting; + struct InputQueueState { + enum class State : std::int32_t { + IDLE = 0, + WAITING = 1, + BLOCKED = 2 + } state = State::IDLE; uint32_t numQueued; - TimingStats timingStats; + Timing timing; QueueStats queueStats; - DEPTHAI_SERIALIZE(QueueState, waiting, numQueued, timingStats); + DEPTHAI_SERIALIZE(InputQueueState, state, numQueued, timing); + }; + struct OutputQueueState { + enum class State : std::int32_t { + IDLE = 0, + BLOCKED = 1 + } state = State::IDLE; + Timing timing; + DEPTHAI_SERIALIZE(OutputQueueState, state, timing); }; + enum class State : std::int32_t { + IDLE = 0, + GETTING_INPUTS = 1, + PROCESSING = 2, + SENDING_OUTPUTS = 3 + }; + + State state = State::IDLE; std::vector events; - std::unordered_map timingsByType; - std::unordered_map outputStats; - std::unordered_map inputStates; - TimingStats mainLoopStats; - std::unordered_map otherStats; + std::unordered_map outputStates; + std::unordered_map inputStates; + Timing inputsGetTiming; + Timing outputsSendTiming; + Timing mainLoopTiming; + std::unordered_map otherTimings; - DEPTHAI_SERIALIZE(NodeState, events, timingsByType, outputStats, inputStates, mainLoopStats, otherStats); + DEPTHAI_SERIALIZE(NodeState, events, outputStates, inputStates, inputsGetTiming, outputsSendTiming, mainLoopTiming, otherTimings); }; /** diff --git a/include/depthai/utility/ImageManipImpl.hpp b/include/depthai/utility/ImageManipImpl.hpp index d3c8dea9e..5472ffd4d 100644 --- a/include/depthai/utility/ImageManipImpl.hpp +++ b/include/depthai/utility/ImageManipImpl.hpp @@ -64,44 +64,48 @@ void loop(N& node, bool hasConfig = false; bool needsImage = true; bool skipImage = false; - if(node.inputConfig.getWaitForMessage()) { - pConfig = node.inputConfig.template get(); - hasConfig = true; - if(inImage != nullptr && hasConfig && pConfig->getReusePreviousImage()) { - needsImage = false; - } - skipImage = pConfig->getSkipCurrentImage(); - } else { - pConfig = node.inputConfig.template tryGet(); - if(pConfig != nullptr) { - hasConfig = true; - } - } + { + auto blockEvent = node.inputBlockEvent(); - if(needsImage) { - inImage = node.inputImage.template get(); - if(inImage == nullptr) { - logger->warn("No input image, skipping frame"); - continue; - } - if(!hasConfig) { - auto _pConfig = node.inputConfig.template tryGet(); - if(_pConfig != nullptr) { - pConfig = _pConfig; + if(node.inputConfig.getWaitForMessage()) { + pConfig = node.inputConfig.template get(); + hasConfig = true; + if(inImage != nullptr && hasConfig && pConfig->getReusePreviousImage()) { + needsImage = false; + } + skipImage = pConfig->getSkipCurrentImage(); + } else { + pConfig = node.inputConfig.template tryGet(); + if(pConfig != nullptr) { hasConfig = true; } } - if(skipImage) { - continue; + + if(needsImage) { + inImage = node.inputImage.template get(); + if(inImage == nullptr) { + logger->warn("No input image, skipping frame"); + continue; + } + if(!hasConfig) { + auto _pConfig = node.inputConfig.template tryGet(); + if(_pConfig != nullptr) { + pConfig = _pConfig; + hasConfig = true; + } + } + if(skipImage) { + continue; + } } - } - // if has new config, parse and check if any changes - if(hasConfig) { - config = *pConfig; - } - if(!node.inputConfig.getWaitForMessage() && config.getReusePreviousImage()) { - logger->warn("reusePreviousImage is only taken into account when inputConfig is synchronous"); + // if has new config, parse and check if any changes + if(hasConfig) { + config = *pConfig; + } + if(!node.inputConfig.getWaitForMessage() && config.getReusePreviousImage()) { + logger->warn("reusePreviousImage is only taken into account when inputConfig is synchronous"); + } } auto startP = std::chrono::steady_clock::now(); @@ -135,7 +139,11 @@ void loop(N& node, if(!success) { logger->error("Processing failed, potentially unsupported config"); } - node.out.send(outImage); + { + auto blockEvent = node.outputBlockEvent(); + + node.out.send(outImage); + } } else { logger->error( "Output image is bigger ({}B) than maximum frame size specified in properties ({}B) - skipping frame.\nPlease use the setMaxOutputFrameSize " diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp index 318c2e809..eda5cb2cd 100644 --- a/include/depthai/utility/PipelineEventDispatcher.hpp +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -33,15 +32,23 @@ class PipelineEventDispatcher : public PipelineEventDispatcherInterface { void setNodeId(int64_t id) override; - void addEvent(const std::string& source, PipelineEvent::Type type) override; - - void startEvent(const std::string& source, - std::optional queueSize = std::nullopt, - std::optional metadata = std::nullopt) override; // Start event with a start and an end - void endEvent(const std::string& source, - std::optional queueSize = std::nullopt, - std::optional metadata = std::nullopt) override; // Stop event with a start and an end - void pingEvent(const std::string& source) override; // Event where stop and start are the same (eg. loop) + void startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) override; + void startInputEvent(const std::string& source, std::optional queueSize = std::nullopt) override; + void startOutputEvent(const std::string& source) override; + void startCustomEvent(const std::string& source) override; + void endEvent(PipelineEvent::Type type, + const std::string& source, + std::optional queueSize = std::nullopt) override; + void endInputEvent(const std::string& source, std::optional queueSize = std::nullopt) override; + void endOutputEvent(const std::string& source) override; + void endCustomEvent(const std::string& source) override; + void pingEvent(PipelineEvent::Type type, const std::string& source) override; + void pingMainLoopEvent() override; + void pingCustomEvent(const std::string& source) override; + BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source) override; + BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup") override; + BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup") override; + BlockPipelineEvent customBlockEvent(const std::string& source) override; }; } // namespace utility diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp index e4f6d9858..46c38003d 100644 --- a/include/depthai/utility/PipelineEventDispatcherInterface.hpp +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -9,16 +9,38 @@ namespace utility { class PipelineEventDispatcherInterface { public: + class BlockPipelineEvent { + PipelineEventDispatcherInterface& dispatcher; + PipelineEvent::Type type; + std::string source; + + public: + BlockPipelineEvent(PipelineEventDispatcherInterface& dispatcher, PipelineEvent::Type type, const std::string& source) + : dispatcher(dispatcher), type(type), source(source) { + dispatcher.startEvent(type, source, std::nullopt); + } + ~BlockPipelineEvent() { + dispatcher.endEvent(type, source, std::nullopt); + } + }; + virtual ~PipelineEventDispatcherInterface() = default; virtual void setNodeId(int64_t id) = 0; - virtual void addEvent(const std::string& source, PipelineEvent::Type type) = 0; - virtual void startEvent(const std::string& source, - std::optional queueSize = std::nullopt, - std::optional metadata = std::nullopt) = 0; // Start event with a start and an end - virtual void endEvent(const std::string& source, - std::optional queueSize = std::nullopt, - std::optional metadata = std::nullopt) = 0; // Stop event with a start and an end - virtual void pingEvent(const std::string& source) = 0; // Event where stop and start are the same (eg. loop) + virtual void startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void startInputEvent(const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void startOutputEvent(const std::string& source) = 0; + virtual void startCustomEvent(const std::string& source) = 0; + virtual void endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void endInputEvent(const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void endOutputEvent(const std::string& source) = 0; + virtual void endCustomEvent(const std::string& source) = 0; + virtual void pingEvent(PipelineEvent::Type type, const std::string& source) = 0; + virtual void pingMainLoopEvent() = 0; + virtual void pingCustomEvent(const std::string& source) = 0; + virtual BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source) = 0; + virtual BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup") = 0; + virtual BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup") = 0; + virtual BlockPipelineEvent customBlockEvent(const std::string& source); }; } // namespace utility diff --git a/src/pipeline/MessageQueue.cpp b/src/pipeline/MessageQueue.cpp index faee735db..eed60214e 100644 --- a/src/pipeline/MessageQueue.cpp +++ b/src/pipeline/MessageQueue.cpp @@ -22,11 +22,7 @@ MessageQueue::MessageQueue(std::string name, unsigned int maxSize, bool blocking, std::shared_ptr pipelineEventDispatcher) - : queue(maxSize, blocking), name(std::move(name)), pipelineEventDispatcher(pipelineEventDispatcher) { - if(pipelineEventDispatcher) { - pipelineEventDispatcher->addEvent(this->name, PipelineEvent::Type::INPUT); - } -} + : queue(maxSize, blocking), name(std::move(name)), pipelineEventDispatcher(pipelineEventDispatcher) {} MessageQueue::MessageQueue(unsigned int maxSize, bool blocking) : queue(maxSize, blocking) {} diff --git a/src/pipeline/Node.cpp b/src/pipeline/Node.cpp index cac6baa87..49490765d 100644 --- a/src/pipeline/Node.cpp +++ b/src/pipeline/Node.cpp @@ -241,11 +241,11 @@ void Node::Output::send(const std::shared_ptr& msg) { // } // } // } - if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(getName()); + if(pipelineEventDispatcher) pipelineEventDispatcher->startOutputEvent(getName()); for(auto& messageQueue : connectedInputs) { messageQueue->send(msg); } - if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(getName()); + if(pipelineEventDispatcher) pipelineEventDispatcher->endOutputEvent(getName()); } bool Node::Output::trySend(const std::shared_ptr& msg) { @@ -265,11 +265,11 @@ bool Node::Output::trySend(const std::shared_ptr& msg) { // } // } // } - if(pipelineEventDispatcher) pipelineEventDispatcher->startEvent(getName()); + if(pipelineEventDispatcher) pipelineEventDispatcher->startOutputEvent(getName()); for(auto& messageQueue : connectedInputs) { success &= messageQueue->trySend(msg); } - if(pipelineEventDispatcher) pipelineEventDispatcher->endEvent(getName()); + if(pipelineEventDispatcher && success) pipelineEventDispatcher->endOutputEvent(getName()); return success; } diff --git a/src/pipeline/ThreadedNode.cpp b/src/pipeline/ThreadedNode.cpp index 242f97db4..b86ad8783 100644 --- a/src/pipeline/ThreadedNode.cpp +++ b/src/pipeline/ThreadedNode.cpp @@ -2,6 +2,9 @@ #include +#include + +#include "depthai/utility/PipelineEventDispatcher.hpp" #include "pipeline/ThreadedNodeImpl.hpp" #include "utility/Environment.hpp" #include "utility/ErrorMacros.hpp" @@ -23,7 +26,6 @@ ThreadedNode::ThreadedNode() { void ThreadedNode::initPipelineEventDispatcher(int64_t nodeId) { pipelineEventDispatcher->setNodeId(nodeId); - pipelineEventDispatcher->addEvent("_mainLoop", PipelineEvent::Type::LOOP); } ThreadedNode::~ThreadedNode() = default; @@ -94,8 +96,20 @@ bool ThreadedNode::isRunning() const { } bool ThreadedNode::mainLoop() { - this->pipelineEventDispatcher->pingEvent("_mainLoop"); + this->pipelineEventDispatcher->pingMainLoopEvent(); return isRunning(); } +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::blockEvent(PipelineEvent::Type type, const std::string& source) { + return pipelineEventDispatcher->blockEvent(type, source); +} +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::inputBlockEvent(const std::string& source) { + // Just for convenience due to the default source + return blockEvent(PipelineEvent::Type::INPUT_BLOCK, source); +} +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::outputBlockEvent(const std::string& source) { + // Just for convenience due to the default source + return blockEvent(PipelineEvent::Type::OUTPUT_BLOCK, source); +} + } // namespace dai diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 6859e939e..749f46587 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -30,9 +30,10 @@ class NodeEventAggregation { : logger(logger), windowSize(windowSize), eventBatchSize(eventBatchSize), eventsBuffer(windowSize) {} NodeState state; utility::CircularBuffer eventsBuffer; - std::unordered_map>> timingsBufferByType; std::unordered_map>> inputTimingsBuffers; std::unordered_map>> outputTimingsBuffers; + std::unordered_map>> inputGroupTimingsBuffers; + std::unordered_map>> outputGroupTimingsBuffers; std::unique_ptr> mainLoopTimingsBuffer; std::unordered_map>> otherTimingsBuffers; @@ -40,10 +41,14 @@ class NodeEventAggregation { std::unordered_map>> inputFpsBuffers; std::unordered_map>> outputFpsBuffers; + std::unordered_map>> inputGroupFpsBuffers; + std::unordered_map>> outputGroupFpsBuffers; std::unordered_map>> otherFpsBuffers; std::unordered_map> ongoingInputEvents; std::unordered_map> ongoingOutputEvents; + std::unordered_map> ongoingInputGroupEvents; + std::unordered_map> ongoingOutputGroupEvents; std::optional ongoingMainLoopEvent; std::unordered_map> ongoingOtherEvents; @@ -65,6 +70,10 @@ class NodeEventAggregation { return ongoingOutputEvents[event.source]; case PipelineEvent::Type::CUSTOM: return ongoingOtherEvents[event.source]; + case PipelineEvent::Type::INPUT_GROUP: + return ongoingInputGroupEvents[event.source]; + case PipelineEvent::Type::OUTPUT_GROUP: + return ongoingOutputGroupEvents[event.source]; } return ongoingMainLoopEvent; // To silence compiler warning }(); @@ -87,6 +96,16 @@ class NodeEventAggregation { otherTimingsBuffers[event.source] = std::make_unique>(windowSize); } return otherTimingsBuffers[event.source]; + case PipelineEvent::Type::INPUT_GROUP: + if(inputGroupTimingsBuffers.find(event.source) == inputGroupTimingsBuffers.end()) { + inputGroupTimingsBuffers[event.source] = std::make_unique>(windowSize); + } + return inputGroupTimingsBuffers[event.source]; + case PipelineEvent::Type::OUTPUT_GROUP: + if(outputGroupTimingsBuffers.find(event.source) == outputGroupTimingsBuffers.end()) { + outputGroupTimingsBuffers[event.source] = std::make_unique>(windowSize); + } + return outputGroupTimingsBuffers[event.source]; } return emptyIntBuffer; // To silence compiler warning }(); @@ -109,6 +128,16 @@ class NodeEventAggregation { otherFpsBuffers[event.source] = std::make_unique>(windowSize); } return otherFpsBuffers[event.source]; + case PipelineEvent::Type::INPUT_GROUP: + if(inputGroupFpsBuffers.find(event.source) == inputGroupFpsBuffers.end()) { + inputGroupFpsBuffers[event.source] = std::make_unique>(windowSize); + } + return inputGroupFpsBuffers[event.source]; + case PipelineEvent::Type::OUTPUT_GROUP: + if(outputGroupFpsBuffers.find(event.source) == outputGroupFpsBuffers.end()) { + outputGroupFpsBuffers[event.source] = std::make_unique>(windowSize); + } + return outputGroupFpsBuffers[event.source]; } return emptyTimeBuffer; // To silence compiler warning }(); @@ -120,10 +149,6 @@ class NodeEventAggregation { eventsBuffer.add(durationEvent); state.events = eventsBuffer.getBuffer(); - if(timingsBufferByType.find(event.type) == timingsBufferByType.end()) { - timingsBufferByType[event.type] = std::make_unique>(windowSize); - } - timingsBufferByType[event.type]->add(durationEvent.durationUs); timingsBuffer->add(durationEvent.durationUs); fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); @@ -132,6 +157,7 @@ class NodeEventAggregation { return true; } else { if(ongoingEvent.has_value()) { + // TODO: add ability to wait for multiple events (nn hailo threaded processing time) logger->warn("Ongoing event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", ongoingEvent->sequenceNum, event.sequenceNum, @@ -159,6 +185,8 @@ class NodeEventAggregation { return ongoingOtherEvents[event.source]; case PipelineEvent::Type::INPUT: case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_GROUP: + case PipelineEvent::Type::OUTPUT_GROUP: throw std::runtime_error("INPUT and OUTPUT events should not be pings"); } return ongoingMainLoopEvent; // To silence compiler warning @@ -174,15 +202,14 @@ class NodeEventAggregation { return otherTimingsBuffers[event.source]; case PipelineEvent::Type::INPUT: case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_GROUP: + case PipelineEvent::Type::OUTPUT_GROUP: throw std::runtime_error("INPUT and OUTPUT events should not be pings"); } return emptyIntBuffer; // To silence compiler warning }(); auto& fpsBuffer = [&]() -> std::unique_ptr>& { switch(event.type) { - case PipelineEvent::Type::INPUT: - case PipelineEvent::Type::OUTPUT: - throw std::runtime_error("INPUT and OUTPUT events should not be pings"); case PipelineEvent::Type::LOOP: break; case PipelineEvent::Type::CUSTOM: @@ -190,6 +217,11 @@ class NodeEventAggregation { otherFpsBuffers[event.source] = std::make_unique>(windowSize); } return otherFpsBuffers[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_GROUP: + case PipelineEvent::Type::OUTPUT_GROUP: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); } return emptyTimeBuffer; // To silence compiler warning }(); @@ -201,13 +233,9 @@ class NodeEventAggregation { eventsBuffer.add(durationEvent); state.events = eventsBuffer.getBuffer(); - if(timingsBufferByType.find(event.type) == timingsBufferByType.end()) { - timingsBufferByType[event.type] = std::make_unique>(windowSize); - } if(timingsBuffer == nullptr) { timingsBuffer = std::make_unique>(windowSize); } - timingsBufferByType[event.type]->add(durationEvent.durationUs); timingsBuffer->add(durationEvent.durationUs); if(fpsBuffer) fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); @@ -285,9 +313,7 @@ class NodeEventAggregation { } else { addedEvent = updateIntervalBuffers(event); } - if(addedEvent && ++count % eventBatchSize == 0) { - // By type - updateTimingStats(state.timingsByType[event.type], *timingsBufferByType[event.type]); + if(addedEvent /* && ++count % eventBatchSize == 0 */) { // TODO // By instance switch(event.type) { case PipelineEvent::Type::CUSTOM: @@ -306,9 +332,17 @@ class NodeEventAggregation { updateTimingStats(state.outputStats[event.source], *outputTimingsBuffers[event.source]); updateFpsStats(state.outputStats[event.source], *outputFpsBuffers[event.source]); break; + case PipelineEvent::Type::INPUT_GROUP: + updateTimingStats(state.inputGroupStats[event.source], *inputGroupTimingsBuffers[event.source]); + updateFpsStats(state.inputGroupStats[event.source], *inputGroupFpsBuffers[event.source]); + break; + case PipelineEvent::Type::OUTPUT_GROUP: + updateTimingStats(state.outputGroupStats[event.source], *outputGroupTimingsBuffers[event.source]); + updateFpsStats(state.outputGroupStats[event.source], *outputGroupFpsBuffers[event.source]); + break; } } - if(event.type == PipelineEvent::Type::INPUT && event.interval == PipelineEvent::Interval::END && ++count % eventBatchSize == 0) { + if(event.type == PipelineEvent::Type::INPUT && event.interval == PipelineEvent::Interval::END /* && ++count % eventBatchSize == 0 */) { // TODO auto& qStats = state.inputStates[event.source].queueStats; auto& qBuffer = *inputQueueSizesBuffers[event.source]; qStats.maxQueued = std::max(qStats.maxQueued, *event.queueSize); @@ -339,8 +373,7 @@ class PipelineEventHandler { std::shared_mutex mutex; public: - PipelineEventHandler( - Node::InputMap* inputs, uint32_t aggregationWindowSize, uint32_t eventBatchSize, std::shared_ptr logger) + PipelineEventHandler(Node::InputMap* inputs, uint32_t aggregationWindowSize, uint32_t eventBatchSize, std::shared_ptr logger) : inputs(inputs), aggregationWindowSize(aggregationWindowSize), eventBatchSize(eventBatchSize), logger(logger) {} void threadedRun() { while(running) { diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index 33bff7a04..bc19992f8 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -1,11 +1,33 @@ #include "depthai/utility/PipelineEventDispatcher.hpp" #include -#include namespace dai { namespace utility { +std::string typeToString(PipelineEvent::Type type) { + switch(type) { + case PipelineEvent::Type::CUSTOM: + return "CUSTOM"; + case PipelineEvent::Type::LOOP: + return "LOOP"; + case PipelineEvent::Type::INPUT: + return "INPUT"; + case PipelineEvent::Type::OUTPUT: + return "OUTPUT"; + case PipelineEvent::Type::INPUT_BLOCK: + return "INPUT_BLOCK"; + case PipelineEvent::Type::OUTPUT_BLOCK: + return "OUTPUT_BLOCK"; + default: + return "UNKNOWN"; + } +} + +std::string makeKey(PipelineEvent::Type type, const std::string& source) { + return typeToString(type) + "#" + source; +} + void PipelineEventDispatcher::checkNodeId() { if(nodeId == -1) { throw std::runtime_error("Node ID not set on PipelineEventDispatcher"); @@ -14,24 +36,10 @@ void PipelineEventDispatcher::checkNodeId() { void PipelineEventDispatcher::setNodeId(int64_t id) { nodeId = id; } -void PipelineEventDispatcher::addEvent(const std::string& source, PipelineEvent::Type type) { - if(!source.empty()) { - if(events.find(source) != events.end()) { - throw std::runtime_error("Event with name '" + source + "' already exists"); - } - PipelineEvent event; - event.type = type; - event.source = source; - events[source] = {event, false}; - } -} -void PipelineEventDispatcher::startEvent(const std::string& source, std::optional queueSize, std::optional metadata) { +void PipelineEventDispatcher::startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { // TODO add mutex checkNodeId(); - if(events.find(source) == events.end()) { - throw std::runtime_error("Event with name " + source + " does not exist"); - } - auto& event = events[source]; + auto& event = events[makeKey(type, source)]; if(event.ongoing) { throw std::runtime_error("Event with name " + source + " is already ongoing"); } @@ -39,25 +47,30 @@ void PipelineEventDispatcher::startEvent(const std::string& source, std::optiona event.event.tsDevice = event.event.ts; ++event.event.sequenceNum; event.event.nodeId = nodeId; - event.event.metadata = std::move(metadata); event.event.queueSize = std::move(queueSize); event.event.interval = PipelineEvent::Interval::START; // type and source are already set event.ongoing = true; if(out) { - out->send(std::make_shared(event.event)); + out->send(std::make_shared(event)); } } -void PipelineEventDispatcher::endEvent(const std::string& source, std::optional queueSize, std::optional metadata) { +void PipelineEventDispatcher::startInputEvent(const std::string& source, std::optional queueSize) { + startEvent(PipelineEvent::Type::INPUT, source, std::move(queueSize)); +} +void PipelineEventDispatcher::startOutputEvent(const std::string& source) { + startEvent(PipelineEvent::Type::OUTPUT, source, std::nullopt); +} +void PipelineEventDispatcher::startCustomEvent(const std::string& source) { + startEvent(PipelineEvent::Type::CUSTOM, source, std::nullopt); +} +void PipelineEventDispatcher::endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { // TODO add mutex checkNodeId(); auto now = std::chrono::steady_clock::now(); - if(events.find(source) == events.end()) { - throw std::runtime_error("Event with name " + source + " does not exist"); - } - auto& event = events[source]; + auto& event = events[makeKey(type, source)]; if(!event.ongoing) { throw std::runtime_error("Event with name " + source + " has not been started"); } @@ -65,7 +78,6 @@ void PipelineEventDispatcher::endEvent(const std::string& source, std::optional< event.event.setTimestamp(now); event.event.tsDevice = event.event.ts; event.event.nodeId = nodeId; - event.event.metadata = std::move(metadata); event.event.queueSize = std::move(queueSize); event.event.interval = PipelineEvent::Interval::END; // type and source are already set @@ -75,18 +87,23 @@ void PipelineEventDispatcher::endEvent(const std::string& source, std::optional< out->send(std::make_shared(event.event)); } - event.event.metadata = std::nullopt; event.event.queueSize = std::nullopt; } -void PipelineEventDispatcher::pingEvent(const std::string& source) { +void PipelineEventDispatcher::endInputEvent(const std::string& source, std::optional queueSize) { + endEvent(PipelineEvent::Type::INPUT, source, std::move(queueSize)); +} +void PipelineEventDispatcher::endOutputEvent(const std::string& source) { + endEvent(PipelineEvent::Type::OUTPUT, source, std::nullopt); +} +void PipelineEventDispatcher::endCustomEvent(const std::string& source) { + endEvent(PipelineEvent::Type::CUSTOM, source, std::nullopt); +} +void PipelineEventDispatcher::pingEvent(PipelineEvent::Type type, const std::string& source) { // TODO add mutex checkNodeId(); auto now = std::chrono::steady_clock::now(); - if(events.find(source) == events.end()) { - throw std::runtime_error("Event with name " + source + " does not exist"); - } - auto& event = events[source]; + auto& event = events[makeKey(type, source)]; if(event.ongoing) { throw std::runtime_error("Event with name " + source + " is already ongoing"); } @@ -101,6 +118,27 @@ void PipelineEventDispatcher::pingEvent(const std::string& source) { out->send(std::make_shared(event.event)); } } +void PipelineEventDispatcher::pingMainLoopEvent() { + pingEvent(PipelineEvent::Type::LOOP, "_mainLoop"); +} +void PipelineEventDispatcher::pingCustomEvent(const std::string& source) { + pingEvent(PipelineEvent::Type::CUSTOM, source); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::blockEvent(PipelineEvent::Type type, const std::string& source) { + return BlockPipelineEvent(*this, type, source); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::inputBlockEvent(const std::string& source) { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::INPUT_BLOCK, source); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::outputBlockEvent(const std::string& source) { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::OUTPUT_BLOCK, source); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::customBlockEvent(const std::string& source) { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::CUSTOM, source); +} } // namespace utility } // namespace dai From 36e917aa712dae394fbb43f4a8bf42c5148badd1 Mon Sep 17 00:00:00 2001 From: asahtik Date: Wed, 8 Oct 2025 13:57:17 +0200 Subject: [PATCH 23/40] Do not require adding events, node / input states, other improvements --- .../datatype/PipelineEventBindings.cpp | 3 +- .../datatype/PipelineStateBindings.cpp | 52 ++++-- .../PipelineEventDispatcherBindings.cpp | 10 +- include/depthai/pipeline/Node.hpp | 3 - include/depthai/pipeline/Pipeline.hpp | 8 +- .../pipeline/datatype/PipelineState.hpp | 2 +- include/depthai/utility/LockingQueue.hpp | 28 ++- .../utility/PipelineEventDispatcher.hpp | 1 + .../PipelineEventDispatcherInterface.hpp | 3 +- src/pipeline/MessageQueue.cpp | 30 +++- .../internal/PipelineEventAggregation.cpp | 170 +++++++++--------- src/utility/PipelineEventDispatcher.cpp | 34 +++- 12 files changed, 226 insertions(+), 118 deletions(-) diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp index f2d028be6..5920810db 100644 --- a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -45,8 +45,7 @@ void bind_pipelineevent(pybind11::module& m, void* pCallstack) { pipelineEvent.def(py::init<>()) .def("__repr__", &PipelineEvent::str) .def_readwrite("nodeId", &PipelineEvent::nodeId, DOC(dai, PipelineEvent, nodeId)) - .def_readwrite("metadata", &PipelineEvent::metadata, DOC(dai, PipelineEvent, metadata)) - .def_readwrite("queueSize", &PipelineEvent::metadata, DOC(dai, PipelineEvent, metadata)) + .def_readwrite("queueSize", &PipelineEvent::queueSize, DOC(dai, PipelineEvent, queueSize)) .def_readwrite("interval", &PipelineEvent::interval, DOC(dai, PipelineEvent, interval)) .def_readwrite("type", &PipelineEvent::type, DOC(dai, PipelineEvent, type)) .def_readwrite("source", &PipelineEvent::source, DOC(dai, PipelineEvent, source)) diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp index 9f40eaf72..0a22611f4 100644 --- a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -1,8 +1,6 @@ #include -#include #include "DatatypeBindings.hpp" -#include "pipeline/CommonBindings.hpp" // depthai #include "depthai/pipeline/datatype/PipelineState.hpp" @@ -19,10 +17,25 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { py::class_ nodeState(m, "NodeState", DOC(dai, NodeState)); py::class_ durationEvent(nodeState, "DurationEvent", DOC(dai, NodeState, DurationEvent)); py::class_ nodeStateTimingStats(nodeState, "TimingStats", DOC(dai, NodeState, TimingStats)); + py::class_ nodeStateTiming(nodeState, "Timing", DOC(dai, NodeState, Timing)); py::class_ nodeStateQueueStats(nodeState, "QueueStats", DOC(dai, NodeState, QueueStats)); - py::class_ nodeStateQueueState(nodeState, "QueueState", DOC(dai, NodeState, QueueState)); + py::class_ nodeStateInputQueueState(nodeState, "InputQueueState", DOC(dai, NodeState, InputQueueState)); + py::class_ nodeStateOutputQueueState(nodeState, "OutputQueueState", DOC(dai, NodeState, OutputQueueState)); py::class_, Buffer, std::shared_ptr> pipelineState(m, "PipelineState", DOC(dai, PipelineState)); + py::enum_(nodeStateInputQueueState, "State", DOC(dai, NodeState, InputQueueState, State)) + .value("IDLE", NodeState::InputQueueState::State::IDLE) + .value("WAITING", NodeState::InputQueueState::State::WAITING) + .value("BLOCKED", NodeState::InputQueueState::State::BLOCKED); + py::enum_(nodeStateOutputQueueState, "State", DOC(dai, NodeState, OutputQueueState, State)) + .value("IDLE", NodeState::OutputQueueState::State::IDLE) + .value("SENDING", NodeState::OutputQueueState::State::SENDING); + py::enum_(nodeState, "State", DOC(dai, NodeState, State)) + .value("IDLE", NodeState::State::IDLE) + .value("GETTING_INPUTS", NodeState::State::GETTING_INPUTS) + .value("PROCESSING", NodeState::State::PROCESSING) + .value("SENDING_OUTPUTS", NodeState::State::SENDING_OUTPUTS); + /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// @@ -52,6 +65,11 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { .def_readwrite("maxMicrosRecent", &NodeState::TimingStats::maxMicrosRecent, DOC(dai, NodeState, TimingStats, maxMicrosRecent)) .def_readwrite("medianMicrosRecent", &NodeState::TimingStats::medianMicrosRecent, DOC(dai, NodeState, TimingStats, medianMicrosRecent)); + nodeStateTiming.def(py::init<>()) + .def("__repr__", &NodeState::Timing::str) + .def_readwrite("fps", &NodeState::Timing::fps, DOC(dai, NodeState, Timing, fps)) + .def_readwrite("durationStats", &NodeState::Timing::durationStats, DOC(dai, NodeState, Timing, durationStats)); + nodeStateQueueStats.def(py::init<>()) .def("__repr__", &NodeState::QueueStats::str) .def_readwrite("maxQueued", &NodeState::QueueStats::maxQueued, DOC(dai, NodeState, QueueStats, maxQueued)) @@ -59,22 +77,28 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { .def_readwrite("maxQueuedRecent", &NodeState::QueueStats::maxQueuedRecent, DOC(dai, NodeState, QueueStats, maxQueuedRecent)) .def_readwrite("medianQueuedRecent", &NodeState::QueueStats::medianQueuedRecent, DOC(dai, NodeState, QueueStats, medianQueuedRecent)); - nodeStateQueueState.def(py::init<>()) - .def("__repr__", &NodeState::QueueState::str) - .def_readwrite("waiting", &NodeState::QueueState::waiting, DOC(dai, NodeState, QueueState, waiting)) - .def_readwrite("numQueued", &NodeState::QueueState::numQueued, DOC(dai, NodeState, QueueState, numQueued)) - .def_readwrite("timingStats", &NodeState::QueueState::timingStats, DOC(dai, NodeState, QueueState, timingStats)) - .def_readwrite("queueStats", &NodeState::QueueState::queueStats, DOC(dai, NodeState, QueueState, queueStats)); + nodeStateInputQueueState.def(py::init<>()) + .def("__repr__", &NodeState::InputQueueState::str) + .def_readwrite("state", &NodeState::InputQueueState::state, DOC(dai, NodeState, InputQueueState, state)) + .def_readwrite("numQueued", &NodeState::InputQueueState::numQueued, DOC(dai, NodeState, InputQueueState, numQueued)) + .def_readwrite("timing", &NodeState::InputQueueState::timing, DOC(dai, NodeState, InputQueueState, timing)) + .def_readwrite("queueStats", &NodeState::InputQueueState::queueStats, DOC(dai, NodeState, InputQueueState, queueStats)); + + nodeStateOutputQueueState.def(py::init<>()) + .def("__repr__", &NodeState::OutputQueueState::str) + .def_readwrite("state", &NodeState::OutputQueueState::state, DOC(dai, NodeState, OutputQueueState, state)) + .def_readwrite("timing", &NodeState::OutputQueueState::timing, DOC(dai, NodeState, OutputQueueState, timing)); nodeState.def(py::init<>()) .def("__repr__", &NodeState::str) + .def_readwrite("state", &NodeState::state, DOC(dai, NodeState, state)) .def_readwrite("events", &NodeState::events, DOC(dai, NodeState, events)) .def_readwrite("inputStates", &NodeState::inputStates, DOC(dai, NodeState, inputStates)) - .def_readwrite("outputStats", &NodeState::outputStats, DOC(dai, NodeState, outputStats)) - .def_readwrite("inputGroupStats", &NodeState::inputGroupStats, DOC(dai, NodeState, inputGroupStats)) - .def_readwrite("outputGroupStats", &NodeState::outputGroupStats, DOC(dai, NodeState, outputGroupStats)) - .def_readwrite("mainLoopStats", &NodeState::mainLoopStats, DOC(dai, NodeState, mainLoopStats)) - .def_readwrite("otherStats", &NodeState::otherStats, DOC(dai, NodeState, otherStats)); + .def_readwrite("outputStates", &NodeState::outputStates, DOC(dai, NodeState, outputStates)) + .def_readwrite("inputsGetTiming", &NodeState::inputsGetTiming, DOC(dai, NodeState, inputsGetTiming)) + .def_readwrite("outputsSendTiming", &NodeState::outputsSendTiming, DOC(dai, NodeState, outputsSendTiming)) + .def_readwrite("mainLoopTiming", &NodeState::mainLoopTiming, DOC(dai, NodeState, mainLoopTiming)) + .def_readwrite("otherTimings", &NodeState::otherTimings, DOC(dai, NodeState, otherTimings)); // Message pipelineState.def(py::init<>()) diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp index 7ce1fd934..5fccc0505 100644 --- a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp @@ -21,8 +21,10 @@ void PipelineEventDispatcherBindings::bind(pybind11::module& m, void* pCallstack pipelineEventDispatcher .def(py::init(), py::arg("output")) .def("setNodeId", &PipelineEventDispatcher::setNodeId, py::arg("id"), DOC(dai, utility, PipelineEventDispatcher, setNodeId)) - .def("addEvent", &PipelineEventDispatcher::addEvent, py::arg("source"), py::arg("type"), DOC(dai, utility, PipelineEventDispatcher, addEvent)) - .def("startEvent", &PipelineEventDispatcher::startEvent, py::arg("source"), py::arg("queueSize") = std::nullopt, py::arg("metadata") = std::nullopt, DOC(dai, utility, PipelineEventDispatcher, startEvent)) - .def("endEvent", &PipelineEventDispatcher::endEvent, py::arg("source"), py::arg("queueSize") = std::nullopt, py::arg("metadata") = std::nullopt, DOC(dai, utility, PipelineEventDispatcher, endEvent)) - .def("pingEvent", &PipelineEventDispatcher::pingEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, pingEvent)); + .def("startCustomEvent", &PipelineEventDispatcher::startCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, startCustomEvent)) + .def("endCustomEvent", &PipelineEventDispatcher::endCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, endCustomEvent)) + .def("pingCustomEvent", &PipelineEventDispatcher::pingCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, pingCustomEvent)) + .def("inputBlockEvent", &PipelineEventDispatcher::inputBlockEvent, py::arg("source") = "defaultInputGroup", DOC(dai, utility, PipelineEventDispatcher, inputBlockEvent)) + .def("outputBlockEvent", &PipelineEventDispatcher::outputBlockEvent, py::arg("source") = "defaultOutputGroup", DOC(dai, utility, PipelineEventDispatcher, outputBlockEvent)) + .def("customBlockEvent", &PipelineEventDispatcher::customBlockEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, customBlockEvent)); } diff --git a/include/depthai/pipeline/Node.hpp b/include/depthai/pipeline/Node.hpp index d8f1833ed..85d826a3f 100644 --- a/include/depthai/pipeline/Node.hpp +++ b/include/depthai/pipeline/Node.hpp @@ -145,9 +145,6 @@ class Node : public std::enable_shared_from_this { if(getName().empty()) { setName(par.createUniqueOutputName()); } - if(pipelineEventDispatcher && getName() != "pipelineEventOutput") { - pipelineEventDispatcher->addEvent(getName(), PipelineEvent::Type::OUTPUT); - } } Node& getParent() { diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 1739a7907..30d1a209a 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -106,7 +106,7 @@ class NodeStateApi> { // TODO send and get return {}; } - std::unordered_map> inputs() { + std::unordered_map> inputs() { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); @@ -154,7 +154,7 @@ class NodeStateApi { std::unordered_map outputs() { return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).outputs()[nodeId]; } - std::unordered_map inputs() { + std::unordered_map inputs() { return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).inputs()[nodeId]; } std::unordered_map otherStats() { @@ -196,7 +196,7 @@ class NodeStateApi { // TODO send and get return {}; } - std::unordered_map inputs(const std::vector& inputNames) { + std::unordered_map inputs(const std::vector& inputNames) { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); @@ -208,7 +208,7 @@ class NodeStateApi { // TODO send and get return {}; } - NodeState::QueueState inputs(const std::string& inputName) { + NodeState::InputQueueState inputs(const std::string& inputName) { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index e30833c92..ef0d66851 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -52,7 +52,7 @@ class NodeState { struct OutputQueueState { enum class State : std::int32_t { IDLE = 0, - BLOCKED = 1 + SENDING = 1 } state = State::IDLE; Timing timing; DEPTHAI_SERIALIZE(OutputQueueState, state, timing); diff --git a/include/depthai/utility/LockingQueue.hpp b/include/depthai/utility/LockingQueue.hpp index f4a859396..54b076153 100644 --- a/include/depthai/utility/LockingQueue.hpp +++ b/include/depthai/utility/LockingQueue.hpp @@ -18,6 +18,8 @@ namespace dai { // Mutex& operator=(Mutex&&) = delete; // }; +enum class LockingQueueState { OK, BLOCKED, CANCELLED }; + template class LockingQueue { public: @@ -149,7 +151,7 @@ class LockingQueue { return true; } - bool push(T const& data) { + bool push(T const& data, std::function callback = [](LockingQueueState) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -166,6 +168,9 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED); + } signalPop.wait(lock, [this]() { return queue.size() < maxSize || destructed; }); if(destructed) return false; } @@ -176,7 +181,7 @@ class LockingQueue { return true; } - bool push(T&& data) { + bool push(T&& data, std::function callback = [](LockingQueueState) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -193,6 +198,9 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED); + } signalPop.wait(lock, [this]() { return queue.size() < maxSize || destructed; }); if(destructed) return false; } @@ -204,7 +212,7 @@ class LockingQueue { } template - bool tryWaitAndPush(T const& data, std::chrono::duration timeout) { + bool tryWaitAndPush(T const& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -221,8 +229,14 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED); + } // First checks predicate, then waits bool pred = signalPop.wait_for(lock, timeout, [this]() { return queue.size() < maxSize || destructed; }); + if(!pred) { + callback(LockingQueueState::CANCELLED); + } if(!pred) return false; if(destructed) return false; } @@ -234,7 +248,7 @@ class LockingQueue { } template - bool tryWaitAndPush(T&& data, std::chrono::duration timeout) { + bool tryWaitAndPush(T&& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -252,7 +266,13 @@ class LockingQueue { } } else { // First checks predicate, then waits + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED); + } bool pred = signalPop.wait_for(lock, timeout, [this]() { return queue.size() < maxSize || destructed; }); + if(!pred) { + callback(LockingQueueState::CANCELLED); + } if(!pred) return false; if(destructed) return false; } diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp index eda5cb2cd..1ac2fe8a9 100644 --- a/include/depthai/utility/PipelineEventDispatcher.hpp +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -45,6 +45,7 @@ class PipelineEventDispatcher : public PipelineEventDispatcherInterface { void pingEvent(PipelineEvent::Type type, const std::string& source) override; void pingMainLoopEvent() override; void pingCustomEvent(const std::string& source) override; + void pingInputEvent(const std::string& source, std::optional queueSize = std::nullopt) override; BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source) override; BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup") override; BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup") override; diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp index 46c38003d..bf2f1d970 100644 --- a/include/depthai/utility/PipelineEventDispatcherInterface.hpp +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -37,10 +37,11 @@ class PipelineEventDispatcherInterface { virtual void pingEvent(PipelineEvent::Type type, const std::string& source) = 0; virtual void pingMainLoopEvent() = 0; virtual void pingCustomEvent(const std::string& source) = 0; + virtual void pingInputEvent(const std::string& source, std::optional queueSize = std::nullopt) = 0; virtual BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source) = 0; virtual BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup") = 0; virtual BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup") = 0; - virtual BlockPipelineEvent customBlockEvent(const std::string& source); + virtual BlockPipelineEvent customBlockEvent(const std::string& source) = 0; }; } // namespace utility diff --git a/src/pipeline/MessageQueue.cpp b/src/pipeline/MessageQueue.cpp index eed60214e..409f4da61 100644 --- a/src/pipeline/MessageQueue.cpp +++ b/src/pipeline/MessageQueue.cpp @@ -117,7 +117,20 @@ void MessageQueue::send(const std::shared_ptr& msg) { throw QueueException(CLOSED_QUEUE_MESSAGE); } callCallbacks(msg); - auto queueNotClosed = queue.push(msg); + auto queueNotClosed = queue.push(msg, [&](LockingQueueState state) { + if(pipelineEventDispatcher) { + switch(state) { + case LockingQueueState::BLOCKED: + pipelineEventDispatcher->pingInputEvent(name, -1); + break; + case LockingQueueState::CANCELLED: + pipelineEventDispatcher->pingInputEvent(name, -2); + break; + case LockingQueueState::OK: + break; + } + } + }); if(!queueNotClosed) throw QueueException(CLOSED_QUEUE_MESSAGE); } @@ -127,7 +140,20 @@ bool MessageQueue::send(const std::shared_ptr& msg, std::chrono::mill if(queue.isDestroyed()) { throw QueueException(CLOSED_QUEUE_MESSAGE); } - return queue.tryWaitAndPush(msg, timeout); + return queue.tryWaitAndPush(msg, timeout, [&](LockingQueueState state) { + if(pipelineEventDispatcher) { + switch(state) { + case LockingQueueState::BLOCKED: + pipelineEventDispatcher->pingInputEvent(name, -1); + break; + case LockingQueueState::CANCELLED: + pipelineEventDispatcher->pingInputEvent(name, -2); + break; + case LockingQueueState::OK: + break; + } + } + }); } bool MessageQueue::trySend(const std::shared_ptr& msg) { diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 749f46587..2372e572f 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -32,8 +32,8 @@ class NodeEventAggregation { utility::CircularBuffer eventsBuffer; std::unordered_map>> inputTimingsBuffers; std::unordered_map>> outputTimingsBuffers; - std::unordered_map>> inputGroupTimingsBuffers; - std::unordered_map>> outputGroupTimingsBuffers; + std::unique_ptr> inputsGetTimingsBuffer; + std::unique_ptr> outputsSendTimingsBuffer; std::unique_ptr> mainLoopTimingsBuffer; std::unordered_map>> otherTimingsBuffers; @@ -41,14 +41,14 @@ class NodeEventAggregation { std::unordered_map>> inputFpsBuffers; std::unordered_map>> outputFpsBuffers; - std::unordered_map>> inputGroupFpsBuffers; - std::unordered_map>> outputGroupFpsBuffers; + std::unique_ptr> inputsGetFpsBuffer; + std::unique_ptr> outputsSendFpsBuffer; std::unordered_map>> otherFpsBuffers; std::unordered_map> ongoingInputEvents; std::unordered_map> ongoingOutputEvents; - std::unordered_map> ongoingInputGroupEvents; - std::unordered_map> ongoingOutputGroupEvents; + std::optional ongoingGetInputsEvent; + std::optional ongoingSendOutputsEvent; std::optional ongoingMainLoopEvent; std::unordered_map> ongoingOtherEvents; @@ -70,10 +70,10 @@ class NodeEventAggregation { return ongoingOutputEvents[event.source]; case PipelineEvent::Type::CUSTOM: return ongoingOtherEvents[event.source]; - case PipelineEvent::Type::INPUT_GROUP: - return ongoingInputGroupEvents[event.source]; - case PipelineEvent::Type::OUTPUT_GROUP: - return ongoingOutputGroupEvents[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return ongoingGetInputsEvent; + case PipelineEvent::Type::OUTPUT_BLOCK: + return ongoingSendOutputsEvent; } return ongoingMainLoopEvent; // To silence compiler warning }(); @@ -82,30 +82,15 @@ class NodeEventAggregation { case PipelineEvent::Type::LOOP: throw std::runtime_error("LOOP event should not be an interval"); case PipelineEvent::Type::INPUT: - if(inputTimingsBuffers.find(event.source) == inputTimingsBuffers.end()) { - inputTimingsBuffers[event.source] = std::make_unique>(windowSize); - } return inputTimingsBuffers[event.source]; case PipelineEvent::Type::OUTPUT: - if(outputTimingsBuffers.find(event.source) == outputTimingsBuffers.end()) { - outputTimingsBuffers[event.source] = std::make_unique>(windowSize); - } return outputTimingsBuffers[event.source]; case PipelineEvent::Type::CUSTOM: - if(otherTimingsBuffers.find(event.source) == otherTimingsBuffers.end()) { - otherTimingsBuffers[event.source] = std::make_unique>(windowSize); - } return otherTimingsBuffers[event.source]; - case PipelineEvent::Type::INPUT_GROUP: - if(inputGroupTimingsBuffers.find(event.source) == inputGroupTimingsBuffers.end()) { - inputGroupTimingsBuffers[event.source] = std::make_unique>(windowSize); - } - return inputGroupTimingsBuffers[event.source]; - case PipelineEvent::Type::OUTPUT_GROUP: - if(outputGroupTimingsBuffers.find(event.source) == outputGroupTimingsBuffers.end()) { - outputGroupTimingsBuffers[event.source] = std::make_unique>(windowSize); - } - return outputGroupTimingsBuffers[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return inputsGetTimingsBuffer; + case PipelineEvent::Type::OUTPUT_BLOCK: + return outputsSendTimingsBuffer; } return emptyIntBuffer; // To silence compiler warning }(); @@ -114,33 +99,22 @@ class NodeEventAggregation { case PipelineEvent::Type::LOOP: throw std::runtime_error("LOOP event should not be an interval"); case PipelineEvent::Type::INPUT: - if(inputFpsBuffers.find(event.source) == inputFpsBuffers.end()) { - inputFpsBuffers[event.source] = std::make_unique>(windowSize); - } return inputFpsBuffers[event.source]; case PipelineEvent::Type::OUTPUT: - if(outputFpsBuffers.find(event.source) == outputFpsBuffers.end()) { - outputFpsBuffers[event.source] = std::make_unique>(windowSize); - } return outputFpsBuffers[event.source]; case PipelineEvent::Type::CUSTOM: - if(otherFpsBuffers.find(event.source) == otherFpsBuffers.end()) { - otherFpsBuffers[event.source] = std::make_unique>(windowSize); - } return otherFpsBuffers[event.source]; - case PipelineEvent::Type::INPUT_GROUP: - if(inputGroupFpsBuffers.find(event.source) == inputGroupFpsBuffers.end()) { - inputGroupFpsBuffers[event.source] = std::make_unique>(windowSize); - } - return inputGroupFpsBuffers[event.source]; - case PipelineEvent::Type::OUTPUT_GROUP: - if(outputGroupFpsBuffers.find(event.source) == outputGroupFpsBuffers.end()) { - outputGroupFpsBuffers[event.source] = std::make_unique>(windowSize); - } - return outputGroupFpsBuffers[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return inputsGetFpsBuffer; + case PipelineEvent::Type::OUTPUT_BLOCK: + return outputsSendFpsBuffer; } return emptyTimeBuffer; // To silence compiler warning }(); + + if(timingsBuffer == nullptr) timingsBuffer = std::make_unique>(windowSize); + if(fpsBuffer == nullptr) fpsBuffer = std::make_unique>(windowSize); + if(ongoingEvent.has_value() && ongoingEvent->sequenceNum == event.sequenceNum && event.interval == PipelineEvent::Interval::END) { // End event NodeState::DurationEvent durationEvent; @@ -185,8 +159,8 @@ class NodeEventAggregation { return ongoingOtherEvents[event.source]; case PipelineEvent::Type::INPUT: case PipelineEvent::Type::OUTPUT: - case PipelineEvent::Type::INPUT_GROUP: - case PipelineEvent::Type::OUTPUT_GROUP: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: throw std::runtime_error("INPUT and OUTPUT events should not be pings"); } return ongoingMainLoopEvent; // To silence compiler warning @@ -196,14 +170,11 @@ class NodeEventAggregation { case PipelineEvent::Type::LOOP: return mainLoopTimingsBuffer; case PipelineEvent::Type::CUSTOM: - if(otherTimingsBuffers.find(event.source) == otherTimingsBuffers.end()) { - otherTimingsBuffers[event.source] = std::make_unique>(windowSize); - } return otherTimingsBuffers[event.source]; case PipelineEvent::Type::INPUT: case PipelineEvent::Type::OUTPUT: - case PipelineEvent::Type::INPUT_GROUP: - case PipelineEvent::Type::OUTPUT_GROUP: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: throw std::runtime_error("INPUT and OUTPUT events should not be pings"); } return emptyIntBuffer; // To silence compiler warning @@ -213,18 +184,19 @@ class NodeEventAggregation { case PipelineEvent::Type::LOOP: break; case PipelineEvent::Type::CUSTOM: - if(otherFpsBuffers.find(event.source) == otherFpsBuffers.end()) { - otherFpsBuffers[event.source] = std::make_unique>(windowSize); - } return otherFpsBuffers[event.source]; case PipelineEvent::Type::INPUT: case PipelineEvent::Type::OUTPUT: - case PipelineEvent::Type::INPUT_GROUP: - case PipelineEvent::Type::OUTPUT_GROUP: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: throw std::runtime_error("INPUT and OUTPUT events should not be pings"); } return emptyTimeBuffer; // To silence compiler warning }(); + + if(timingsBuffer == nullptr) timingsBuffer = std::make_unique>(windowSize); + if(fpsBuffer == nullptr) fpsBuffer = std::make_unique>(windowSize); + if(ongoingEvent.has_value() && ongoingEvent->sequenceNum == event.sequenceNum - 1) { // End event NodeState::DurationEvent durationEvent; @@ -233,9 +205,6 @@ class NodeEventAggregation { eventsBuffer.add(durationEvent); state.events = eventsBuffer.getBuffer(); - if(timingsBuffer == nullptr) { - timingsBuffer = std::make_unique>(windowSize); - } timingsBuffer->add(durationEvent.durationUs); if(fpsBuffer) fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); @@ -285,12 +254,12 @@ class NodeEventAggregation { stats.medianMicrosRecent = (stats.medianMicrosRecent + bufferByType[bufferByType.size() / 2 - 1]) / 2; } } - inline void updateFpsStats(NodeState::TimingStats& stats, const utility::CircularBuffer& buffer) { + inline void updateFpsStats(NodeState::Timing& timing, const utility::CircularBuffer& buffer) { if(buffer.size() < 2) return; auto timeDiff = std::chrono::duration_cast(buffer.last().time - buffer.first().time).count(); auto frameDiff = buffer.last().sequenceNum - buffer.first().sequenceNum; if(timeDiff > 0 && buffer.last().sequenceNum > buffer.first().sequenceNum) { - stats.fps = frameDiff * (1e6f / (float)timeDiff); + timing.fps = frameDiff * (1e6f / (float)timeDiff); } } @@ -307,38 +276,77 @@ class NodeEventAggregation { throw std::runtime_error(fmt::format("INPUT END event must have queue size set source: {}, node {}", event.source, event.nodeId)); } } + // Update states + switch(event.type) { + case PipelineEvent::Type::CUSTOM: + case PipelineEvent::Type::LOOP: + break; + case PipelineEvent::Type::INPUT: + state.inputStates[event.source].numQueued = *event.queueSize; + switch(event.interval) { + case PipelineEvent::Interval::START: + state.inputStates[event.source].state = NodeState::InputQueueState::State::WAITING; + break; + case PipelineEvent::Interval::END: + state.inputStates[event.source].state = NodeState::InputQueueState::State::IDLE; + break; + case PipelineEvent::Interval::NONE: + if(event.queueSize.has_value() && (event.queueSize == -1 || event.queueSize == -2)) + state.inputStates[event.source].state = NodeState::InputQueueState::State::BLOCKED; + break; + } + break; + case PipelineEvent::Type::OUTPUT: + if(event.interval == PipelineEvent::Interval::START) + state.outputStates[event.source].state = NodeState::OutputQueueState::State::SENDING; + else if(event.interval == PipelineEvent::Interval::END) + state.outputStates[event.source].state = NodeState::OutputQueueState::State::IDLE; + break; + case PipelineEvent::Type::INPUT_BLOCK: + if(event.interval == PipelineEvent::Interval::START) + state.state = NodeState::State::GETTING_INPUTS; + else if(event.interval == PipelineEvent::Interval::END) + state.state = NodeState::State::PROCESSING; + break; + case PipelineEvent::Type::OUTPUT_BLOCK: + if(event.interval == PipelineEvent::Interval::START) + state.state = NodeState::State::SENDING_OUTPUTS; + else if(event.interval == PipelineEvent::Interval::END) + state.state = NodeState::State::PROCESSING; + break; + } bool addedEvent = false; - if(event.interval == PipelineEvent::Interval::NONE) { + if(event.interval == PipelineEvent::Interval::NONE && event.type != PipelineEvent::Type::INPUT && event.type != PipelineEvent::Type::OUTPUT) { addedEvent = updatePingBuffers(event); - } else { + } else if(event.interval != PipelineEvent::Interval::NONE) { addedEvent = updateIntervalBuffers(event); } if(addedEvent /* && ++count % eventBatchSize == 0 */) { // TODO // By instance switch(event.type) { case PipelineEvent::Type::CUSTOM: - updateTimingStats(state.otherStats[event.source], *otherTimingsBuffers[event.source]); - updateFpsStats(state.otherStats[event.source], *otherFpsBuffers[event.source]); + updateTimingStats(state.otherTimings[event.source].durationStats, *otherTimingsBuffers[event.source]); + updateFpsStats(state.otherTimings[event.source], *otherFpsBuffers[event.source]); break; case PipelineEvent::Type::LOOP: - updateTimingStats(state.mainLoopStats, *mainLoopTimingsBuffer); - state.mainLoopStats.fps = 1e6f / (float)state.mainLoopStats.averageMicrosRecent; + updateTimingStats(state.mainLoopTiming.durationStats, *mainLoopTimingsBuffer); + state.mainLoopTiming.fps = 1e6f / (float)state.mainLoopTiming.durationStats.averageMicrosRecent; break; case PipelineEvent::Type::INPUT: - updateTimingStats(state.inputStates[event.source].timingStats, *inputTimingsBuffers[event.source]); - updateFpsStats(state.inputStates[event.source].timingStats, *inputFpsBuffers[event.source]); + updateTimingStats(state.inputStates[event.source].timing.durationStats, *inputTimingsBuffers[event.source]); + updateFpsStats(state.inputStates[event.source].timing, *inputFpsBuffers[event.source]); break; case PipelineEvent::Type::OUTPUT: - updateTimingStats(state.outputStats[event.source], *outputTimingsBuffers[event.source]); - updateFpsStats(state.outputStats[event.source], *outputFpsBuffers[event.source]); + updateTimingStats(state.outputStates[event.source].timing.durationStats, *outputTimingsBuffers[event.source]); + updateFpsStats(state.outputStates[event.source].timing, *outputFpsBuffers[event.source]); break; - case PipelineEvent::Type::INPUT_GROUP: - updateTimingStats(state.inputGroupStats[event.source], *inputGroupTimingsBuffers[event.source]); - updateFpsStats(state.inputGroupStats[event.source], *inputGroupFpsBuffers[event.source]); + case PipelineEvent::Type::INPUT_BLOCK: + updateTimingStats(state.inputsGetTiming.durationStats, *inputsGetTimingsBuffer); + updateFpsStats(state.inputsGetTiming, *inputsGetFpsBuffer); break; - case PipelineEvent::Type::OUTPUT_GROUP: - updateTimingStats(state.outputGroupStats[event.source], *outputGroupTimingsBuffers[event.source]); - updateFpsStats(state.outputGroupStats[event.source], *outputGroupFpsBuffers[event.source]); + case PipelineEvent::Type::OUTPUT_BLOCK: + updateTimingStats(state.outputsSendTiming.durationStats, *outputsSendTimingsBuffer); + updateFpsStats(state.outputsSendTiming, *outputsSendFpsBuffer); break; } } diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index bc19992f8..0393c26c6 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -23,11 +23,15 @@ std::string typeToString(PipelineEvent::Type type) { return "UNKNOWN"; } } - std::string makeKey(PipelineEvent::Type type, const std::string& source) { return typeToString(type) + "#" + source; } +bool blacklist(PipelineEvent::Type type, const std::string& source) { + if(type == PipelineEvent::Type::OUTPUT && source == "pipelineEventOutput") return true; + return false; +} + void PipelineEventDispatcher::checkNodeId() { if(nodeId == -1) { throw std::runtime_error("Node ID not set on PipelineEventDispatcher"); @@ -39,6 +43,8 @@ void PipelineEventDispatcher::setNodeId(int64_t id) { void PipelineEventDispatcher::startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { // TODO add mutex checkNodeId(); + if(blacklist(type, source)) return; + auto& event = events[makeKey(type, source)]; if(event.ongoing) { throw std::runtime_error("Event with name " + source + " is already ongoing"); @@ -53,7 +59,7 @@ void PipelineEventDispatcher::startEvent(PipelineEvent::Type type, const std::st event.ongoing = true; if(out) { - out->send(std::make_shared(event)); + out->send(std::make_shared(event.event)); } } void PipelineEventDispatcher::startInputEvent(const std::string& source, std::optional queueSize) { @@ -68,6 +74,8 @@ void PipelineEventDispatcher::startCustomEvent(const std::string& source) { void PipelineEventDispatcher::endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { // TODO add mutex checkNodeId(); + if(blacklist(type, source)) return; + auto now = std::chrono::steady_clock::now(); auto& event = events[makeKey(type, source)]; @@ -101,6 +109,8 @@ void PipelineEventDispatcher::endCustomEvent(const std::string& source) { void PipelineEventDispatcher::pingEvent(PipelineEvent::Type type, const std::string& source) { // TODO add mutex checkNodeId(); + if(blacklist(type, source)) return; + auto now = std::chrono::steady_clock::now(); auto& event = events[makeKey(type, source)]; @@ -124,6 +134,26 @@ void PipelineEventDispatcher::pingMainLoopEvent() { void PipelineEventDispatcher::pingCustomEvent(const std::string& source) { pingEvent(PipelineEvent::Type::CUSTOM, source); } +void PipelineEventDispatcher::pingInputEvent(const std::string& source, std::optional queueSize) { + // TODO add mutex + checkNodeId(); + if(blacklist(PipelineEvent::Type::INPUT, source)) return; + + auto now = std::chrono::steady_clock::now(); + + auto& event = events[makeKey(PipelineEvent::Type::INPUT, source)]; + PipelineEvent eventCopy = event.event; + eventCopy.setTimestamp(now); + eventCopy.tsDevice = eventCopy.ts; + eventCopy.nodeId = nodeId; + eventCopy.queueSize = std::move(queueSize); + eventCopy.interval = PipelineEvent::Interval::NONE; + // type and source are already set + + if(out) { + out->send(std::make_shared(eventCopy)); + } +} PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::blockEvent(PipelineEvent::Type type, const std::string& source) { return BlockPipelineEvent(*this, type, source); } From 48c1b6a92f807932dd93b78fcd311f3529cf8585 Mon Sep 17 00:00:00 2001 From: asahtik Date: Wed, 8 Oct 2025 16:39:13 +0200 Subject: [PATCH 24/40] Add ability to get the entire pipeline schema, add bridge info to pipeline schema --- include/depthai/pipeline/NodeObjInfo.hpp | 3 +- include/depthai/pipeline/Pipeline.hpp | 11 +- include/depthai/pipeline/PipelineSchema.hpp | 3 +- src/pipeline/Pipeline.cpp | 197 ++++++++++-------- .../internal/PipelineEventAggregation.cpp | 2 +- 5 files changed, 128 insertions(+), 88 deletions(-) diff --git a/include/depthai/pipeline/NodeObjInfo.hpp b/include/depthai/pipeline/NodeObjInfo.hpp index da1c87a0d..c786065dd 100644 --- a/include/depthai/pipeline/NodeObjInfo.hpp +++ b/include/depthai/pipeline/NodeObjInfo.hpp @@ -15,6 +15,7 @@ struct NodeObjInfo { std::string name; std::string alias; + std::string device; std::vector properties; @@ -27,6 +28,6 @@ struct NodeObjInfo { std::unordered_map, NodeIoInfo, IoInfoKey> ioInfo; }; -DEPTHAI_SERIALIZE_EXT(NodeObjInfo, id, parentId, name, alias, properties, logLevel, ioInfo); +DEPTHAI_SERIALIZE_EXT(NodeObjInfo, id, parentId, name, alias, device, properties, logLevel, ioInfo); } // namespace dai diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 30d1a209a..480729b7c 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -251,7 +251,9 @@ class PipelineStateApi { std::vector nodeIds; // empty means all nodes public: - PipelineStateApi(std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest, const std::vector>& allNodes) + PipelineStateApi(std::shared_ptr pipelineStateOut, + std::shared_ptr pipelineStateRequest, + const std::vector>& allNodes) : pipelineStateOut(std::move(pipelineStateOut)), pipelineStateRequest(std::move(pipelineStateRequest)) { for(const auto& n : allNodes) { nodeIds.push_back(n->id); @@ -294,6 +296,7 @@ class PipelineImpl : public std::enable_shared_from_this { // Functions Node::Id getNextUniqueId(); PipelineSchema getPipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; + PipelineSchema getDevicePipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; Device::Config getDeviceConfig() const; void setCameraTuningBlobPath(const fs::path& path); void setXLinkChunkSize(int sizeBytes); @@ -341,6 +344,7 @@ class PipelineImpl : public std::enable_shared_from_this { // using NodeMap = std::unordered_map>; // NodeMap nodeMap; std::vector> nodes; + std::vector> xlinkBridges; // TODO(themarpe) - refactor, connections are now carried by nodes instead using NodeConnectionMap = std::unordered_map>; @@ -539,6 +543,11 @@ class Pipeline { */ PipelineSchema getPipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; + /** + * @returns Device pipeline schema (without host only nodes and connections) + */ + PipelineSchema getDevicePipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; + // void loadAssets(AssetManager& assetManager); void serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage) const { impl()->serialize(schema, assets, assetStorage); diff --git a/include/depthai/pipeline/PipelineSchema.hpp b/include/depthai/pipeline/PipelineSchema.hpp index b8cd497d0..a9fee1ada 100644 --- a/include/depthai/pipeline/PipelineSchema.hpp +++ b/include/depthai/pipeline/PipelineSchema.hpp @@ -14,8 +14,9 @@ struct PipelineSchema { std::vector connections; GlobalProperties globalProperties; std::unordered_map nodes; + std::vector> bridges; }; -DEPTHAI_SERIALIZE_EXT(PipelineSchema, connections, globalProperties, nodes); +DEPTHAI_SERIALIZE_EXT(PipelineSchema, connections, globalProperties, nodes, bridges); } // namespace dai diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 737cea306..9ed64bdee 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -74,6 +74,10 @@ PipelineSchema Pipeline::getPipelineSchema(SerializationType type) const { return pimpl->getPipelineSchema(type); } +PipelineSchema Pipeline::getDevicePipelineSchema(SerializationType type) const { + return pimpl->getDevicePipelineSchema(type); +} + GlobalProperties PipelineImpl::getGlobalProperties() const { return globalProperties; } @@ -117,7 +121,7 @@ std::vector> PipelineImpl::getSourceNodes() { void PipelineImpl::serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage, SerializationType type) const { // Set schema - schema = getPipelineSchema(type); + schema = getDevicePipelineSchema(type); // Serialize all asset managers into asset storage assetStorage.clear(); @@ -188,6 +192,7 @@ std::vector PipelineImpl::getConnections() const { PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { PipelineSchema schema; schema.globalProperties = globalProperties; + schema.bridges = xlinkBridges; int latestIoId = 0; // Loop over all nodes, and add them to schema for(const auto& node : getAllNodes()) { @@ -195,94 +200,91 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) { continue; } - // Check if its a host node or device node - if(node->runOnHost()) { - // host node, no need to serialize to a schema - // TBD any additional changes - } else { - // Create 'node' info - NodeObjInfo info; - info.id = node->id; - info.name = node->getName(); - info.alias = node->getAlias(); - info.parentId = node->parentId; - const auto& deviceNode = std::dynamic_pointer_cast(node); - if(!deviceNode) { - throw std::invalid_argument(fmt::format("Node '{}' should subclass DeviceNode or have hostNode == true", info.name)); - } + // Create 'node' info + NodeObjInfo info; + info.id = node->id; + info.name = node->getName(); + info.alias = node->getAlias(); + info.parentId = node->parentId; + info.device = node->runOnHost() ? "host" : "device"; + const auto& deviceNode = std::dynamic_pointer_cast(node); + if(!node->runOnHost() && !deviceNode) { + throw std::invalid_argument(fmt::format("Node '{}' should subclass DeviceNode or have hostNode == true", info.name)); + } + if(deviceNode) { deviceNode->getProperties().serialize(info.properties, type); info.logLevel = deviceNode->getLogLevel(); - // Create Io information - auto inputs = node->getInputs(); - auto outputs = node->getOutputs(); - - info.ioInfo.reserve(inputs.size() + outputs.size()); - - // Add inputs - for(const auto& input : inputs) { - NodeIoInfo io; - io.id = latestIoId; - latestIoId++; - io.blocking = input.getBlocking(); - io.queueSize = input.getMaxSize(); - io.name = input.getName(); - io.group = input.getGroup(); - auto ioKey = std::make_tuple(io.group, io.name); - - io.waitForMessage = input.getWaitForMessage(); - switch(input.getType()) { - case Node::Input::Type::MReceiver: - io.type = NodeIoInfo::Type::MReceiver; - break; - case Node::Input::Type::SReceiver: - io.type = NodeIoInfo::Type::SReceiver; - break; - } + } + // Create Io information + auto inputs = node->getInputs(); + auto outputs = node->getOutputs(); + + info.ioInfo.reserve(inputs.size() + outputs.size()); + + // Add inputs + for(const auto& input : inputs) { + NodeIoInfo io; + io.id = latestIoId; + latestIoId++; + io.blocking = input.getBlocking(); + io.queueSize = input.getMaxSize(); + io.name = input.getName(); + io.group = input.getGroup(); + auto ioKey = std::make_tuple(io.group, io.name); + + io.waitForMessage = input.getWaitForMessage(); + switch(input.getType()) { + case Node::Input::Type::MReceiver: + io.type = NodeIoInfo::Type::MReceiver; + break; + case Node::Input::Type::SReceiver: + io.type = NodeIoInfo::Type::SReceiver; + break; + } - if(info.ioInfo.count(ioKey) > 0) { - if(io.group == "") { - throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); - } else { - throw std::invalid_argument( - fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); - } + if(info.ioInfo.count(ioKey) > 0) { + if(io.group == "") { + throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); + } else { + throw std::invalid_argument( + fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); } - info.ioInfo[ioKey] = io; } + info.ioInfo[ioKey] = io; + } - // Add outputs - for(const auto& output : outputs) { - NodeIoInfo io; - io.id = latestIoId; - latestIoId++; - io.blocking = false; - io.name = output.getName(); - io.group = output.getGroup(); - auto ioKey = std::make_tuple(io.group, io.name); - - switch(output.getType()) { - case Node::Output::Type::MSender: - io.type = NodeIoInfo::Type::MSender; - break; - case Node::Output::Type::SSender: - io.type = NodeIoInfo::Type::SSender; - break; - } + // Add outputs + for(const auto& output : outputs) { + NodeIoInfo io; + io.id = latestIoId; + latestIoId++; + io.blocking = false; + io.name = output.getName(); + io.group = output.getGroup(); + auto ioKey = std::make_tuple(io.group, io.name); + + switch(output.getType()) { + case Node::Output::Type::MSender: + io.type = NodeIoInfo::Type::MSender; + break; + case Node::Output::Type::SSender: + io.type = NodeIoInfo::Type::SSender; + break; + } - if(info.ioInfo.count(ioKey) > 0) { - if(io.group == "") { - throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); - } else { - throw std::invalid_argument( - fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); - } + if(info.ioInfo.count(ioKey) > 0) { + if(io.group == "") { + throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); + } else { + throw std::invalid_argument( + fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); } - info.ioInfo[ioKey] = io; } - - // At the end, add the constructed node information to the schema - schema.nodes[info.id] = info; + info.ioInfo[ioKey] = io; } + + // At the end, add the constructed node information to the schema + schema.nodes[info.id] = info; } // Create 'connections' info @@ -314,11 +316,6 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { bool outputHost = outNode->runOnHost(); bool inputHost = inNode->runOnHost(); - if(outputHost && inputHost) { - // skip - connection between host nodes doesn't have to be represented to the device - continue; - } - if(outputHost && !inputHost) { throw std::invalid_argument( fmt::format("Connection from host node '{}' to device node '{}' is not allowed during serialization.", outNode->getName(), inNode->getName())); @@ -336,6 +333,35 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { return schema; } +PipelineSchema PipelineImpl::getDevicePipelineSchema(SerializationType type) const { + auto schema = getPipelineSchema(type); + // Remove bridge info + schema.bridges.clear(); + // Remove host nodes + for(auto it = schema.nodes.begin(); it != schema.nodes.end();) { + if(it->second.device != "device") { + it = schema.nodes.erase(it); + } else { + ++it; + } + } + // Remove connections between host nodes (host - device connections should not exist) + schema.connections.erase(std::remove_if(schema.connections.begin(), + schema.connections.end(), + [&schema](const NodeConnectionSchema& c) { + auto node1 = schema.nodes.find(c.node1Id); + auto node2 = schema.nodes.find(c.node2Id); + if(node1 == schema.nodes.end() && node2 == schema.nodes.end()) { + return true; + } else if(node1 == schema.nodes.end() || node2 == schema.nodes.end()) { + throw std::invalid_argument("Connection from host node to device node should not exist here"); + } + return false; + }), + schema.connections.end()); + return schema; +} + Device::Config PipelineImpl::getDeviceConfig() const { Device::Config config; config.board = board; @@ -759,6 +785,9 @@ void PipelineImpl::build() { xLinkBridge.xLinkInHost->setStreamName(streamName); xLinkBridge.xLinkInHost->setConnection(defaultDevice->getConnection()); connection.out->link(xLinkBridge.xLinkOut->input); + + // Note the created bridge for serialization (for visualization) + xlinkBridges.push_back({outNode->id, inNode->id}); } auto xLinkBridge = bridgesOut[connection.out]; connection.out->unlink(*connection.in); // Unlink the connection diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 2372e572f..439694004 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -131,7 +131,7 @@ class NodeEventAggregation { return true; } else { if(ongoingEvent.has_value()) { - // TODO: add ability to wait for multiple events (nn hailo threaded processing time) + // TODO: add ability to wait for multiple events (nn hailo threaded processing time - events with custom ids for tracking) logger->warn("Ongoing event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", ongoingEvent->sequenceNum, event.sequenceNum, From c8ec85a41417d776a3c3c57ef03d1e3a299da514 Mon Sep 17 00:00:00 2001 From: asahtik Date: Wed, 8 Oct 2025 16:52:56 +0200 Subject: [PATCH 25/40] Add comments to pipeline state --- .../pipeline/datatype/PipelineState.hpp | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index ef0d66851..658db2215 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -39,21 +39,28 @@ class NodeState { DEPTHAI_SERIALIZE(QueueStats, maxQueued, minQueuedRecent, maxQueuedRecent, medianQueuedRecent); }; struct InputQueueState { + // Current state of the input queue. enum class State : std::int32_t { IDLE = 0, - WAITING = 1, - BLOCKED = 2 + WAITING = 1, // Waiting to receive a message + BLOCKED = 2 // An output attempted to send to this input, but the input queue was full } state = State::IDLE; + // Number of messages currently queued in the input queue uint32_t numQueued; + // Timing info about this input Timing timing; + // Queue usage stats QueueStats queueStats; DEPTHAI_SERIALIZE(InputQueueState, state, numQueued, timing); }; struct OutputQueueState { + // Current state of the output queue. Send should ideally be instant. This is not the case when the input queue is full. + // In that case, the state will be SENDING until there is space in the input queue (unless trySend is used). enum class State : std::int32_t { IDLE = 0, SENDING = 1 } state = State::IDLE; + // Timing info about this output Timing timing; DEPTHAI_SERIALIZE(OutputQueueState, state, timing); }; @@ -64,13 +71,21 @@ class NodeState { SENDING_OUTPUTS = 3 }; + // Current state of the node - idle only when not running State state = State::IDLE; + // Optional list of recent events std::vector events; + // Info about each output std::unordered_map outputStates; + // Info about each input std::unordered_map inputStates; + // Time spent getting inputs in a loop Timing inputsGetTiming; + // Time spent sending outputs in a loop Timing outputsSendTiming; + // Main node loop timing (processing time + inputs get + outputs send) Timing mainLoopTiming; + // Other timings that the developer of the node decided to add std::unordered_map otherTimings; DEPTHAI_SERIALIZE(NodeState, events, outputStates, inputStates, inputsGetTiming, outputsSendTiming, mainLoopTiming, otherTimings); From 1a2b848d210590b260909563c87709534acef357 Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 09:26:08 +0200 Subject: [PATCH 26/40] Bugfix --- .../node/internal/PipelineEventAggregation.cpp | 10 ++++------ src/utility/PipelineEventDispatcher.cpp | 15 ++++++++------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 439694004..e88f082cd 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -129,7 +129,7 @@ class NodeEventAggregation { ongoingEvent = std::nullopt; return true; - } else { + } else if(event.interval == PipelineEvent::Interval::START) { if(ongoingEvent.has_value()) { // TODO: add ability to wait for multiple events (nn hailo threaded processing time - events with custom ids for tracking) logger->warn("Ongoing event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", @@ -138,12 +138,10 @@ class NodeEventAggregation { ongoingEvent->source, event.nodeId); } - if(event.interval == PipelineEvent::Interval::START) { - // Start event - ongoingEvent = event; - } - return false; + // Start event + ongoingEvent = event; } + return false; } inline bool updatePingBuffers(PipelineEvent& event) { diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index 0393c26c6..502fd15ff 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -46,16 +46,14 @@ void PipelineEventDispatcher::startEvent(PipelineEvent::Type type, const std::st if(blacklist(type, source)) return; auto& event = events[makeKey(type, source)]; - if(event.ongoing) { - throw std::runtime_error("Event with name " + source + " is already ongoing"); - } event.event.setTimestamp(std::chrono::steady_clock::now()); event.event.tsDevice = event.event.ts; ++event.event.sequenceNum; event.event.nodeId = nodeId; event.event.queueSize = std::move(queueSize); event.event.interval = PipelineEvent::Interval::START; - // type and source are already set + event.event.type = type; + event.event.source = source; event.ongoing = true; if(out) { @@ -88,7 +86,8 @@ void PipelineEventDispatcher::endEvent(PipelineEvent::Type type, const std::stri event.event.nodeId = nodeId; event.event.queueSize = std::move(queueSize); event.event.interval = PipelineEvent::Interval::END; - // type and source are already set + event.event.type = type; + event.event.source = source; event.ongoing = false; if(out) { @@ -122,7 +121,8 @@ void PipelineEventDispatcher::pingEvent(PipelineEvent::Type type, const std::str ++event.event.sequenceNum; event.event.nodeId = nodeId; event.event.interval = PipelineEvent::Interval::NONE; - // type and source are already set + event.event.type = type; + event.event.source = source; if(out) { out->send(std::make_shared(event.event)); @@ -148,7 +148,8 @@ void PipelineEventDispatcher::pingInputEvent(const std::string& source, std::opt eventCopy.nodeId = nodeId; eventCopy.queueSize = std::move(queueSize); eventCopy.interval = PipelineEvent::Interval::NONE; - // type and source are already set + eventCopy.type = PipelineEvent::Type::INPUT; + eventCopy.source = source; if(out) { out->send(std::make_shared(eventCopy)); From 5a6d7652188cae7f6c3ab32d4db21d48466c70e5 Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 10:54:37 +0200 Subject: [PATCH 27/40] Update input queue size more often --- .../datatype/PipelineEventBindings.cpp | 1 + .../pipeline/datatype/PipelineEvent.hpp | 3 ++- include/depthai/utility/LockingQueue.hpp | 20 +++++++++---------- .../utility/PipelineEventDispatcher.hpp | 2 +- .../PipelineEventDispatcherInterface.hpp | 2 +- src/pipeline/MessageQueue.cpp | 12 +++++------ .../internal/PipelineEventAggregation.cpp | 5 ++--- src/utility/PipelineEventDispatcher.cpp | 3 ++- 8 files changed, 25 insertions(+), 23 deletions(-) diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp index 5920810db..f3437db59 100644 --- a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -45,6 +45,7 @@ void bind_pipelineevent(pybind11::module& m, void* pCallstack) { pipelineEvent.def(py::init<>()) .def("__repr__", &PipelineEvent::str) .def_readwrite("nodeId", &PipelineEvent::nodeId, DOC(dai, PipelineEvent, nodeId)) + .def_readwrite("status", &PipelineEvent::status, DOC(dai, PipelineEvent, status)) .def_readwrite("queueSize", &PipelineEvent::queueSize, DOC(dai, PipelineEvent, queueSize)) .def_readwrite("interval", &PipelineEvent::interval, DOC(dai, PipelineEvent, interval)) .def_readwrite("type", &PipelineEvent::type, DOC(dai, PipelineEvent, type)) diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp index b0c17f215..1ececed60 100644 --- a/include/depthai/pipeline/datatype/PipelineEvent.hpp +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -30,6 +30,7 @@ class PipelineEvent : public Buffer { virtual ~PipelineEvent() = default; int64_t nodeId = -1; + int32_t status = 0; std::optional queueSize; Interval interval = Interval::NONE; Type type = Type::CUSTOM; @@ -40,7 +41,7 @@ class PipelineEvent : public Buffer { datatype = DatatypeEnum::PipelineEvent; }; - DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, interval, type, source); + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, status, interval, type, source); }; } // namespace dai diff --git a/include/depthai/utility/LockingQueue.hpp b/include/depthai/utility/LockingQueue.hpp index 54b076153..40db11abf 100644 --- a/include/depthai/utility/LockingQueue.hpp +++ b/include/depthai/utility/LockingQueue.hpp @@ -151,7 +151,7 @@ class LockingQueue { return true; } - bool push(T const& data, std::function callback = [](LockingQueueState) {}) { + bool push(T const& data, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -169,7 +169,7 @@ class LockingQueue { } } else { if(queue.size() >= maxSize) { - callback(LockingQueueState::BLOCKED); + callback(LockingQueueState::BLOCKED, queue.size()); } signalPop.wait(lock, [this]() { return queue.size() < maxSize || destructed; }); if(destructed) return false; @@ -181,7 +181,7 @@ class LockingQueue { return true; } - bool push(T&& data, std::function callback = [](LockingQueueState) {}) { + bool push(T&& data, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -199,7 +199,7 @@ class LockingQueue { } } else { if(queue.size() >= maxSize) { - callback(LockingQueueState::BLOCKED); + callback(LockingQueueState::BLOCKED, queue.size()); } signalPop.wait(lock, [this]() { return queue.size() < maxSize || destructed; }); if(destructed) return false; @@ -212,7 +212,7 @@ class LockingQueue { } template - bool tryWaitAndPush(T const& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState) {}) { + bool tryWaitAndPush(T const& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -230,12 +230,12 @@ class LockingQueue { } } else { if(queue.size() >= maxSize) { - callback(LockingQueueState::BLOCKED); + callback(LockingQueueState::BLOCKED, queue.size()); } // First checks predicate, then waits bool pred = signalPop.wait_for(lock, timeout, [this]() { return queue.size() < maxSize || destructed; }); if(!pred) { - callback(LockingQueueState::CANCELLED); + callback(LockingQueueState::CANCELLED, queue.size()); } if(!pred) return false; if(destructed) return false; @@ -248,7 +248,7 @@ class LockingQueue { } template - bool tryWaitAndPush(T&& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState) {}) { + bool tryWaitAndPush(T&& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -267,11 +267,11 @@ class LockingQueue { } else { // First checks predicate, then waits if(queue.size() >= maxSize) { - callback(LockingQueueState::BLOCKED); + callback(LockingQueueState::BLOCKED, queue.size()); } bool pred = signalPop.wait_for(lock, timeout, [this]() { return queue.size() < maxSize || destructed; }); if(!pred) { - callback(LockingQueueState::CANCELLED); + callback(LockingQueueState::CANCELLED, queue.size()); } if(!pred) return false; if(destructed) return false; diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp index 1ac2fe8a9..3590b0967 100644 --- a/include/depthai/utility/PipelineEventDispatcher.hpp +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -45,7 +45,7 @@ class PipelineEventDispatcher : public PipelineEventDispatcherInterface { void pingEvent(PipelineEvent::Type type, const std::string& source) override; void pingMainLoopEvent() override; void pingCustomEvent(const std::string& source) override; - void pingInputEvent(const std::string& source, std::optional queueSize = std::nullopt) override; + void pingInputEvent(const std::string& source, int32_t status, std::optional queueSize = std::nullopt) override; BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source) override; BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup") override; BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup") override; diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp index bf2f1d970..9fe66b16f 100644 --- a/include/depthai/utility/PipelineEventDispatcherInterface.hpp +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -37,7 +37,7 @@ class PipelineEventDispatcherInterface { virtual void pingEvent(PipelineEvent::Type type, const std::string& source) = 0; virtual void pingMainLoopEvent() = 0; virtual void pingCustomEvent(const std::string& source) = 0; - virtual void pingInputEvent(const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void pingInputEvent(const std::string& source, int32_t status, std::optional queueSize = std::nullopt) = 0; virtual BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source) = 0; virtual BlockPipelineEvent inputBlockEvent(const std::string& source = "defaultInputGroup") = 0; virtual BlockPipelineEvent outputBlockEvent(const std::string& source = "defaultOutputGroup") = 0; diff --git a/src/pipeline/MessageQueue.cpp b/src/pipeline/MessageQueue.cpp index 409f4da61..4e906bb71 100644 --- a/src/pipeline/MessageQueue.cpp +++ b/src/pipeline/MessageQueue.cpp @@ -117,14 +117,14 @@ void MessageQueue::send(const std::shared_ptr& msg) { throw QueueException(CLOSED_QUEUE_MESSAGE); } callCallbacks(msg); - auto queueNotClosed = queue.push(msg, [&](LockingQueueState state) { + auto queueNotClosed = queue.push(msg, [&](LockingQueueState state, size_t size) { if(pipelineEventDispatcher) { switch(state) { case LockingQueueState::BLOCKED: - pipelineEventDispatcher->pingInputEvent(name, -1); + pipelineEventDispatcher->pingInputEvent(name, -1, size); break; case LockingQueueState::CANCELLED: - pipelineEventDispatcher->pingInputEvent(name, -2); + pipelineEventDispatcher->pingInputEvent(name, -2, size); break; case LockingQueueState::OK: break; @@ -140,14 +140,14 @@ bool MessageQueue::send(const std::shared_ptr& msg, std::chrono::mill if(queue.isDestroyed()) { throw QueueException(CLOSED_QUEUE_MESSAGE); } - return queue.tryWaitAndPush(msg, timeout, [&](LockingQueueState state) { + return queue.tryWaitAndPush(msg, timeout, [&](LockingQueueState state, size_t size) { if(pipelineEventDispatcher) { switch(state) { case LockingQueueState::BLOCKED: - pipelineEventDispatcher->pingInputEvent(name, -1); + pipelineEventDispatcher->pingInputEvent(name, -1, size); break; case LockingQueueState::CANCELLED: - pipelineEventDispatcher->pingInputEvent(name, -2); + pipelineEventDispatcher->pingInputEvent(name, -2, size); break; case LockingQueueState::OK: break; diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index e88f082cd..4b22f3a30 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -280,7 +280,7 @@ class NodeEventAggregation { case PipelineEvent::Type::LOOP: break; case PipelineEvent::Type::INPUT: - state.inputStates[event.source].numQueued = *event.queueSize; + if(event.queueSize.has_value()) state.inputStates[event.source].numQueued = *event.queueSize; switch(event.interval) { case PipelineEvent::Interval::START: state.inputStates[event.source].state = NodeState::InputQueueState::State::WAITING; @@ -289,8 +289,7 @@ class NodeEventAggregation { state.inputStates[event.source].state = NodeState::InputQueueState::State::IDLE; break; case PipelineEvent::Interval::NONE: - if(event.queueSize.has_value() && (event.queueSize == -1 || event.queueSize == -2)) - state.inputStates[event.source].state = NodeState::InputQueueState::State::BLOCKED; + if(event.status == -1 || event.status == -2) state.inputStates[event.source].state = NodeState::InputQueueState::State::BLOCKED; break; } break; diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index 502fd15ff..c76c90f48 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -134,7 +134,7 @@ void PipelineEventDispatcher::pingMainLoopEvent() { void PipelineEventDispatcher::pingCustomEvent(const std::string& source) { pingEvent(PipelineEvent::Type::CUSTOM, source); } -void PipelineEventDispatcher::pingInputEvent(const std::string& source, std::optional queueSize) { +void PipelineEventDispatcher::pingInputEvent(const std::string& source, int32_t status, std::optional queueSize) { // TODO add mutex checkNodeId(); if(blacklist(PipelineEvent::Type::INPUT, source)) return; @@ -146,6 +146,7 @@ void PipelineEventDispatcher::pingInputEvent(const std::string& source, std::opt eventCopy.setTimestamp(now); eventCopy.tsDevice = eventCopy.ts; eventCopy.nodeId = nodeId; + eventCopy.status = std::move(status); eventCopy.queueSize = std::move(queueSize); eventCopy.interval = PipelineEvent::Interval::NONE; eventCopy.type = PipelineEvent::Type::INPUT; From af86cae59186e8090a0c3cb04b282b87216a027d Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 10:59:40 +0200 Subject: [PATCH 28/40] Add ability to disable sending pipeline events --- include/depthai/utility/PipelineEventDispatcherInterface.hpp | 2 ++ src/utility/PipelineEventDispatcher.cpp | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp index 9fe66b16f..e8f35751f 100644 --- a/include/depthai/utility/PipelineEventDispatcherInterface.hpp +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -24,6 +24,8 @@ class PipelineEventDispatcherInterface { } }; + bool sendEvents = true; + virtual ~PipelineEventDispatcherInterface() = default; virtual void setNodeId(int64_t id) = 0; virtual void startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) = 0; diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index c76c90f48..a67504518 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -42,6 +42,7 @@ void PipelineEventDispatcher::setNodeId(int64_t id) { } void PipelineEventDispatcher::startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { // TODO add mutex + if(!sendEvents) return; checkNodeId(); if(blacklist(type, source)) return; @@ -71,6 +72,7 @@ void PipelineEventDispatcher::startCustomEvent(const std::string& source) { } void PipelineEventDispatcher::endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { // TODO add mutex + if(!sendEvents) return; checkNodeId(); if(blacklist(type, source)) return; @@ -107,6 +109,7 @@ void PipelineEventDispatcher::endCustomEvent(const std::string& source) { } void PipelineEventDispatcher::pingEvent(PipelineEvent::Type type, const std::string& source) { // TODO add mutex + if(!sendEvents) return; checkNodeId(); if(blacklist(type, source)) return; @@ -136,6 +139,7 @@ void PipelineEventDispatcher::pingCustomEvent(const std::string& source) { } void PipelineEventDispatcher::pingInputEvent(const std::string& source, int32_t status, std::optional queueSize) { // TODO add mutex + if(!sendEvents) return; checkNodeId(); if(blacklist(PipelineEvent::Type::INPUT, source)) return; From 0405afbbb460c6edafd204cbfea64de68c5b2e46 Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 12:30:24 +0200 Subject: [PATCH 29/40] Remove ongoing event warnings --- .../internal/PipelineEventAggregation.cpp | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 4b22f3a30..24a276340 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -132,11 +132,11 @@ class NodeEventAggregation { } else if(event.interval == PipelineEvent::Interval::START) { if(ongoingEvent.has_value()) { // TODO: add ability to wait for multiple events (nn hailo threaded processing time - events with custom ids for tracking) - logger->warn("Ongoing event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", - ongoingEvent->sequenceNum, - event.sequenceNum, - ongoingEvent->source, - event.nodeId); + // logger->warn("Ongoing event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", + // ongoingEvent->sequenceNum, + // event.sequenceNum, + // ongoingEvent->source, + // event.nodeId); } // Start event ongoingEvent = event; @@ -211,11 +211,11 @@ class NodeEventAggregation { return true; } else if(ongoingEvent.has_value()) { - logger->warn("Ongoing main loop event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", - ongoingEvent->sequenceNum, - event.sequenceNum, - ongoingEvent->source, - event.nodeId); + // logger->warn("Ongoing main loop event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", + // ongoingEvent->sequenceNum, + // event.sequenceNum, + // ongoingEvent->source, + // event.nodeId); } // Start event ongoingEvent = event; From af4d644bee45a31b9bdd07a7fc12fb4220153330 Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 12:38:10 +0200 Subject: [PATCH 30/40] RVC4 FW: core bump --- cmake/Depthai/DepthaiDeviceRVC4Config.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake index 1e26e9d08..c26383a21 100644 --- a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake +++ b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake @@ -3,4 +3,4 @@ set(DEPTHAI_DEVICE_RVC4_MATURITY "snapshot") # "version if applicable" -set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+cd0fc231d8c860acf11ab424109d7e42500626ae") +set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+51cca92012e8d745a2825d0157ea21d2c7e518aa") From 5024bc96904a88d69c41cdebf3113876973e8f96 Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 14:08:36 +0200 Subject: [PATCH 31/40] Fix getting pipeline state from device --- examples/cpp/HostNodes/image_manip_host.cpp | 3 +++ .../cpp/ObjectTracker/object_tracker_replay.cpp | 17 +++++++++++++++++ .../datatype/PipelineEventAggregationConfig.hpp | 2 +- .../depthai/pipeline/datatype/PipelineState.hpp | 1 - .../node/internal/PipelineStateMerge.cpp | 1 - 5 files changed, 21 insertions(+), 3 deletions(-) diff --git a/examples/cpp/HostNodes/image_manip_host.cpp b/examples/cpp/HostNodes/image_manip_host.cpp index 6bf7edafa..8688237e1 100644 --- a/examples/cpp/HostNodes/image_manip_host.cpp +++ b/examples/cpp/HostNodes/image_manip_host.cpp @@ -40,10 +40,13 @@ int main(int argc, char** argv) { pipeline.start(); + // TODO remove before merge while(pipeline.isRunning()) { std::this_thread::sleep_for(std::chrono::milliseconds(5000)); std::cout << "Pipeline state: " << pipeline.getPipelineState().nodes().detailed().str() << std::endl; } pipeline.stop(); + // + // pipeline.wait(); } diff --git a/examples/cpp/ObjectTracker/object_tracker_replay.cpp b/examples/cpp/ObjectTracker/object_tracker_replay.cpp index 7a5586826..6b44ae83c 100644 --- a/examples/cpp/ObjectTracker/object_tracker_replay.cpp +++ b/examples/cpp/ObjectTracker/object_tracker_replay.cpp @@ -37,16 +37,33 @@ int main() { // Start pipeline pipeline.start(); + // TODO remove before merge + nlohmann::json j; + j["pipeline"] = pipeline.getPipelineSchema(); + std::cout << "Pipeline schema: " << j.dump(2) << std::endl; + // + // FPS calculation variables auto startTime = std::chrono::steady_clock::now(); int counter = 0; float fps = 0; cv::Scalar color(255, 255, 255); + // TODO remove before merge + int index = 0; + // + while(pipeline.isRunning()) { auto imgFrame = preview->get(); auto track = tracklets->get(); + // TODO remove before merge + if(index++ % 30 == 0) { + std::cout << "----------------------------------------" << std::endl; + std::cout << "Pipeline state: " << pipeline.getPipelineState().nodes().detailed().str() << std::endl; + } + // + counter++; auto currentTime = std::chrono::steady_clock::now(); auto elapsed = std::chrono::duration_cast(currentTime - startTime).count(); diff --git a/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp index afa804b0c..56f844cd1 100644 --- a/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp +++ b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp @@ -31,7 +31,7 @@ class PipelineEventAggregationConfig : public Buffer { void serialize(std::vector& metadata, DatatypeEnum& datatype) const override; - DEPTHAI_SERIALIZE(PipelineEventAggregationConfig, nodes, repeat); + DEPTHAI_SERIALIZE(PipelineEventAggregationConfig, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodes, repeat); }; } // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index 658db2215..8e3228991 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -4,7 +4,6 @@ #include "depthai/pipeline/datatype/Buffer.hpp" #include "depthai/pipeline/datatype/PipelineEvent.hpp" -#include "depthai/common/optional.hpp" namespace dai { diff --git a/src/pipeline/node/internal/PipelineStateMerge.cpp b/src/pipeline/node/internal/PipelineStateMerge.cpp index c3d4f7a38..7e65ccd88 100644 --- a/src/pipeline/node/internal/PipelineStateMerge.cpp +++ b/src/pipeline/node/internal/PipelineStateMerge.cpp @@ -46,7 +46,6 @@ void PipelineStateMerge::run() { if(waitForMatch && deviceState != nullptr && currentConfig.has_value()) { while(isRunning() && deviceState->configSequenceNum != currentConfig->sequenceNum) { deviceState = inputDevice.get(); - if(!isRunning()) break; } } if(deviceState != nullptr) { From a5dbc4a81a0a6940fcb6eaeca0727a301a035269 Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 15:22:46 +0200 Subject: [PATCH 32/40] Bugfix, implement API for getting state (c++) --- .../node/PipelineEventAggregationBindings.cpp | 3 +- include/depthai/pipeline/Pipeline.hpp | 214 +++++++++++++----- .../pipeline/datatype/PipelineEvent.hpp | 2 +- .../PipelineEventAggregationConfig.hpp | 3 +- .../pipeline/datatype/PipelineState.hpp | 2 +- .../PipelineEventAggregationProperties.hpp | 3 +- .../internal/PipelineEventAggregation.cpp | 43 +++- 7 files changed, 199 insertions(+), 71 deletions(-) diff --git a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp index 3fcf8a616..aa52d802a 100644 --- a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp +++ b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp @@ -26,8 +26,7 @@ void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack) { // // // Properties // pipelineEventAggregationProperties.def_readwrite("aggregationWindowSize", &PipelineEventAggregationProperties::aggregationWindowSize) - // .def_readwrite("eventBatchSize", &PipelineEventAggregationProperties::eventBatchSize) - // .def_readwrite("sendEvents", &PipelineEventAggregationProperties::sendEvents); + // .def_readwrite("eventBatchSize", &PipelineEventAggregationProperties::eventBatchSize); // // // Node // pipelineEventAggregation.def_readonly("out", &PipelineEventAggregation::out, DOC(dai, node, PipelineEventAggregation, out)) diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 480729b7c..8780f894a 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -45,33 +45,34 @@ namespace fs = std::filesystem; * pipeline.getState().nodes({nodeId1}).otherStats({statName1}) -> std::unordered_map; * pipeline.getState().nodes({nodeId1}).outputs(statName) -> TimingStats; */ -template -class NodeStateApi {}; -template <> -class NodeStateApi> { +// TODO move this somewhere else +class NodesStateApi { std::vector nodeIds; std::shared_ptr pipelineStateOut; std::shared_ptr pipelineStateRequest; public: - explicit NodeStateApi(std::vector nodeIds, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + explicit NodesStateApi(std::vector nodeIds, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) : nodeIds(std::move(nodeIds)), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} - std::unordered_map> summary() { + PipelineState summary() { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); for(auto id : nodeIds) { NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = id; - nodeCfg.summary = true; + nodeCfg.events = false; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any cfg.nodes.push_back(nodeCfg); } pipelineStateRequest->send(std::make_shared(cfg)); auto state = pipelineStateOut->get(); if(!state) throw std::runtime_error("Failed to get PipelineState"); - return {}; // TODO + return *state; } PipelineState detailed() { PipelineEventAggregationConfig cfg; @@ -80,10 +81,7 @@ class NodeStateApi> { for(auto id : nodeIds) { NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = id; - nodeCfg.summary = true; // contains main loop timing - nodeCfg.inputs = {}; // send all - nodeCfg.outputs = {}; // send all - nodeCfg.others = {}; // send all + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); } @@ -92,19 +90,27 @@ class NodeStateApi> { if(!state) throw std::runtime_error("Failed to get PipelineState"); return *state; } - std::unordered_map> outputs() { + std::unordered_map> outputs() { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); for(auto id : nodeIds) { NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = id; - nodeCfg.outputs = {}; // send all + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); } - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.outputStates; + } + return result; } std::unordered_map> inputs() { PipelineEventAggregationConfig cfg; @@ -113,30 +119,45 @@ class NodeStateApi> { for(auto id : nodeIds) { NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = id; - nodeCfg.inputs = {}; // send all + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); } - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.inputStates; + } + return result; } - std::unordered_map> otherStats() { + std::unordered_map> otherTimings() { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); for(auto id : nodeIds) { NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = id; - nodeCfg.others = {}; // send all + nodeCfg.inputs = {}; // Do not send any + nodeCfg.outputs = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); } - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.otherTimings; + } + return result; } }; -template <> -class NodeStateApi { +class NodeStateApi { Node::Id nodeId; std::shared_ptr pipelineStateOut; @@ -145,56 +166,87 @@ class NodeStateApi { public: explicit NodeStateApi(Node::Id nodeId, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) : nodeId(nodeId), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} - std::unordered_map summary() { - return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).summary()[nodeId]; + NodeState summary() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).summary().nodeStates[nodeId]; } NodeState detailed() { - return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).detailed().nodeStates[nodeId]; + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).detailed().nodeStates[nodeId]; } - std::unordered_map outputs() { - return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).outputs()[nodeId]; + std::unordered_map outputs() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).outputs()[nodeId]; } std::unordered_map inputs() { - return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).inputs()[nodeId]; + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).inputs()[nodeId]; } - std::unordered_map otherStats() { - return NodeStateApi>({nodeId}, pipelineStateOut, pipelineStateRequest).otherStats()[nodeId]; + std::unordered_map otherTimings() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).otherTimings()[nodeId]; } - std::unordered_map outputs(const std::vector& outputNames) { + std::unordered_map outputs(const std::vector& outputNames) { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = nodeId; nodeCfg.outputs = outputNames; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& outputName : outputNames) { + result[outputName] = state->nodeStates[nodeId].outputStates[outputName]; + } + return result; } - NodeState::TimingStats outputs(const std::string& outputName) { + NodeState::OutputQueueState outputs(const std::string& outputName) { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = nodeId; nodeCfg.outputs = {outputName}; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].outputStates.find(outputName) == state->nodeStates[nodeId].outputStates.end()) { + throw std::runtime_error("Output name " + outputName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].outputStates[outputName]; } - std::unordered_map> events() { + std::vector events() { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any nodeCfg.events = true; cfg.nodes.push_back(nodeCfg); - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + return state->nodeStates[nodeId].events; } std::unordered_map inputs(const std::vector& inputNames) { PipelineEventAggregationConfig cfg; @@ -203,10 +255,22 @@ class NodeStateApi { NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = nodeId; nodeCfg.inputs = inputNames; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& inputName : inputNames) { + result[inputName] = state->nodeStates[nodeId].inputStates[inputName]; + } + return result; } NodeState::InputQueueState inputs(const std::string& inputName) { PipelineEventAggregationConfig cfg; @@ -215,34 +279,68 @@ class NodeStateApi { NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = nodeId; nodeCfg.inputs = {inputName}; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].inputStates.find(inputName) == state->nodeStates[nodeId].inputStates.end()) { + throw std::runtime_error("Input name " + inputName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].inputStates[inputName]; } - std::unordered_map otherStats(const std::vector& statNames) { + std::unordered_map otherTimings(const std::vector& statNames) { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = nodeId; nodeCfg.others = statNames; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& otherName : statNames) { + result[otherName] = state->nodeStates[nodeId].otherTimings[otherName]; + } + return result; } - NodeState::TimingStats otherStats(const std::string& statName) { + NodeState::Timing otherStats(const std::string& statName) { PipelineEventAggregationConfig cfg; cfg.repeat = false; cfg.setTimestamp(std::chrono::steady_clock::now()); NodeEventAggregationConfig nodeCfg; nodeCfg.nodeId = nodeId; nodeCfg.others = {statName}; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.events = false; cfg.nodes.push_back(nodeCfg); - // TODO send and get - return {}; + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].otherTimings.find(statName) == state->nodeStates[nodeId].otherTimings.end()) { + throw std::runtime_error("Stat name " + statName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].otherTimings[statName]; } }; class PipelineStateApi { @@ -259,14 +357,14 @@ class PipelineStateApi { nodeIds.push_back(n->id); } } - NodeStateApi> nodes() { - return NodeStateApi>(nodeIds, pipelineStateOut, pipelineStateRequest); + NodesStateApi nodes() { + return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); } - NodeStateApi> nodes(const std::vector& nodeIds) { - return NodeStateApi>(nodeIds, pipelineStateOut, pipelineStateRequest); + NodesStateApi nodes(const std::vector& nodeIds) { + return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); } - NodeStateApi nodes(Node::Id nodeId) { - return NodeStateApi(nodeId, pipelineStateOut, pipelineStateRequest); + NodeStateApi nodes(Node::Id nodeId) { + return NodeStateApi(nodeId, pipelineStateOut, pipelineStateRequest); } }; diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp index 1ececed60..03293fa97 100644 --- a/include/depthai/pipeline/datatype/PipelineEvent.hpp +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -41,7 +41,7 @@ class PipelineEvent : public Buffer { datatype = DatatypeEnum::PipelineEvent; }; - DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, status, interval, type, source); + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, status, queueSize, interval, type, source); }; } // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp index 56f844cd1..e321bccea 100644 --- a/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp +++ b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp @@ -14,10 +14,9 @@ class NodeEventAggregationConfig { std::optional> inputs; std::optional> outputs; std::optional> others; - bool summary = false; bool events = false; - DEPTHAI_SERIALIZE(NodeEventAggregationConfig, nodeId, inputs, outputs, others, summary, events); + DEPTHAI_SERIALIZE(NodeEventAggregationConfig, nodeId, inputs, outputs, others, events); }; /// PipelineEventAggregationConfig configuration structure diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index 8e3228991..261994571 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -87,7 +87,7 @@ class NodeState { // Other timings that the developer of the node decided to add std::unordered_map otherTimings; - DEPTHAI_SERIALIZE(NodeState, events, outputStates, inputStates, inputsGetTiming, outputsSendTiming, mainLoopTiming, otherTimings); + DEPTHAI_SERIALIZE(NodeState, state, events, outputStates, inputStates, inputsGetTiming, outputsSendTiming, mainLoopTiming, otherTimings); }; /** diff --git a/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp index b5195a75f..04fd9c940 100644 --- a/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp +++ b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp @@ -10,9 +10,8 @@ namespace dai { struct PipelineEventAggregationProperties : PropertiesSerializable { uint32_t aggregationWindowSize = 100; uint32_t eventBatchSize = 50; - bool sendEvents = false; }; -DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, aggregationWindowSize, eventBatchSize, sendEvents); +DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, aggregationWindowSize, eventBatchSize); } // namespace dai diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 24a276340..2bd4f0701 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -454,15 +454,48 @@ void PipelineEventAggregation::run() { } if(gotConfig || (currentConfig.has_value() && currentConfig->repeat)) { bool updated = handler.getState(outState); - for(auto& [nodeId, nodeState] : outState->nodeStates) { - if(!properties.sendEvents) nodeState.events.clear(); - } - outState->sequenceNum = sequenceNum++; outState->configSequenceNum = currentConfig.has_value() ? currentConfig->sequenceNum : 0; outState->setTimestamp(std::chrono::steady_clock::now()); outState->tsDevice = outState->ts; - // TODO: send only requested data + + for(auto it = outState->nodeStates.begin(); it != outState->nodeStates.end();) { + auto nodeConfig = std::find_if( + currentConfig->nodes.begin(), currentConfig->nodes.end(), [&](const NodeEventAggregationConfig& cfg) { return cfg.nodeId == it->first; }); + if(nodeConfig == currentConfig->nodes.end()) { + it = outState->nodeStates.erase(it); + } else { + if(nodeConfig->inputs.has_value()) { + auto inputStates = it->second.inputStates; + it->second.inputStates.clear(); + for(const auto& inputName : *nodeConfig->inputs) { + if(inputStates.find(inputName) != inputStates.end()) { + it->second.inputStates[inputName] = inputStates[inputName]; + } + } + } + if(nodeConfig->outputs.has_value()) { + auto outputStates = it->second.outputStates; + it->second.outputStates.clear(); + for(const auto& outputName : *nodeConfig->outputs) { + if(outputStates.find(outputName) != outputStates.end()) { + it->second.outputStates[outputName] = outputStates[outputName]; + } + } + } + if(nodeConfig->others.has_value()) { + auto otherTimings = it->second.otherTimings; + it->second.otherTimings.clear(); + for(const auto& otherName : *nodeConfig->others) { + if(otherTimings.find(otherName) != otherTimings.end()) { + it->second.otherTimings[otherName] = otherTimings[otherName]; + } + } + } + if(!nodeConfig->events) it->second.events.clear(); + ++it; + } + } if(gotConfig || (currentConfig.has_value() && currentConfig->repeat && updated)) out.send(outState); } std::this_thread::sleep_for(std::chrono::milliseconds(10)); From 89a168fd7e883fff70df628f7c3151dcd74220fa Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 15:27:01 +0200 Subject: [PATCH 33/40] Fix pipeline state filtering --- .../internal/PipelineEventAggregation.cpp | 59 ++++++++++--------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index 2bd4f0701..dde5fc920 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -459,41 +459,44 @@ void PipelineEventAggregation::run() { outState->setTimestamp(std::chrono::steady_clock::now()); outState->tsDevice = outState->ts; - for(auto it = outState->nodeStates.begin(); it != outState->nodeStates.end();) { - auto nodeConfig = std::find_if( - currentConfig->nodes.begin(), currentConfig->nodes.end(), [&](const NodeEventAggregationConfig& cfg) { return cfg.nodeId == it->first; }); - if(nodeConfig == currentConfig->nodes.end()) { - it = outState->nodeStates.erase(it); - } else { - if(nodeConfig->inputs.has_value()) { - auto inputStates = it->second.inputStates; - it->second.inputStates.clear(); - for(const auto& inputName : *nodeConfig->inputs) { - if(inputStates.find(inputName) != inputStates.end()) { - it->second.inputStates[inputName] = inputStates[inputName]; + if(!currentConfig->nodes.empty()) { + for(auto it = outState->nodeStates.begin(); it != outState->nodeStates.end();) { + auto nodeConfig = std::find_if(currentConfig->nodes.begin(), currentConfig->nodes.end(), [&](const NodeEventAggregationConfig& cfg) { + return cfg.nodeId == it->first; + }); + if(nodeConfig == currentConfig->nodes.end()) { + it = outState->nodeStates.erase(it); + } else { + if(nodeConfig->inputs.has_value()) { + auto inputStates = it->second.inputStates; + it->second.inputStates.clear(); + for(const auto& inputName : *nodeConfig->inputs) { + if(inputStates.find(inputName) != inputStates.end()) { + it->second.inputStates[inputName] = inputStates[inputName]; + } } } - } - if(nodeConfig->outputs.has_value()) { - auto outputStates = it->second.outputStates; - it->second.outputStates.clear(); - for(const auto& outputName : *nodeConfig->outputs) { - if(outputStates.find(outputName) != outputStates.end()) { - it->second.outputStates[outputName] = outputStates[outputName]; + if(nodeConfig->outputs.has_value()) { + auto outputStates = it->second.outputStates; + it->second.outputStates.clear(); + for(const auto& outputName : *nodeConfig->outputs) { + if(outputStates.find(outputName) != outputStates.end()) { + it->second.outputStates[outputName] = outputStates[outputName]; + } } } - } - if(nodeConfig->others.has_value()) { - auto otherTimings = it->second.otherTimings; - it->second.otherTimings.clear(); - for(const auto& otherName : *nodeConfig->others) { - if(otherTimings.find(otherName) != otherTimings.end()) { - it->second.otherTimings[otherName] = otherTimings[otherName]; + if(nodeConfig->others.has_value()) { + auto otherTimings = it->second.otherTimings; + it->second.otherTimings.clear(); + for(const auto& otherName : *nodeConfig->others) { + if(otherTimings.find(otherName) != otherTimings.end()) { + it->second.otherTimings[otherName] = otherTimings[otherName]; + } } } + if(!nodeConfig->events) it->second.events.clear(); + ++it; } - if(!nodeConfig->events) it->second.events.clear(); - ++it; } } if(gotConfig || (currentConfig.has_value() && currentConfig->repeat && updated)) out.send(outState); From c5befed2ce627820e04bea62caa4b71fa8e5d726 Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 9 Oct 2025 16:12:01 +0200 Subject: [PATCH 34/40] RVC4 FW: time main loop of some nodes --- cmake/Depthai/DepthaiDeviceRVC4Config.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake index c26383a21..380cdd5e9 100644 --- a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake +++ b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake @@ -3,4 +3,4 @@ set(DEPTHAI_DEVICE_RVC4_MATURITY "snapshot") # "version if applicable" -set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+51cca92012e8d745a2825d0157ea21d2c7e518aa") +set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+e19591198e276d1580a98f77b3387c1abc881042") From c79abbf237775aaa4358f3b4b857bf1b0c1c3e04 Mon Sep 17 00:00:00 2001 From: asahtik Date: Fri, 10 Oct 2025 10:07:10 +0200 Subject: [PATCH 35/40] Add python bindings for getting pipeline state --- CMakeLists.txt | 1 + .../python/src/pipeline/PipelineBindings.cpp | 56 +++ include/depthai/pipeline/Pipeline.hpp | 339 +----------------- 3 files changed, 58 insertions(+), 338 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 237f2c5f7..ff4122a05 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -273,6 +273,7 @@ set(TARGET_CORE_SOURCES src/device/CalibrationHandler.cpp src/device/Version.cpp src/pipeline/Pipeline.cpp + src/pipeline/PipelineStateApi.cpp src/pipeline/AssetManager.cpp src/pipeline/MessageQueue.cpp src/pipeline/Node.cpp diff --git a/bindings/python/src/pipeline/PipelineBindings.cpp b/bindings/python/src/pipeline/PipelineBindings.cpp index 02d10b3bd..f2b1e6ee8 100644 --- a/bindings/python/src/pipeline/PipelineBindings.cpp +++ b/bindings/python/src/pipeline/PipelineBindings.cpp @@ -64,6 +64,9 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack) { py::class_ globalProperties(m, "GlobalProperties", DOC(dai, GlobalProperties)); py::class_ recordConfig(m, "RecordConfig", DOC(dai, RecordConfig)); py::class_ recordVideoConfig(recordConfig, "VideoEncoding", DOC(dai, RecordConfig, VideoEncoding)); + py::class_ pipelineStateApi(m, "PipelineStateApi", DOC(dai, PipelineStateApi)); + py::class_ nodesStateApi(m, "NodesStateApi", DOC(dai, NodesStateApi)); + py::class_ nodeStateApi(m, "NodeStateApi", DOC(dai, NodeStateApi)); py::class_ pipeline(m, "Pipeline", DOC(dai, Pipeline, 2)); /////////////////////////////////////////////////////////////////////// @@ -102,6 +105,59 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack) { .def_readwrite("videoEncoding", &RecordConfig::videoEncoding, DOC(dai, RecordConfig, videoEncoding)) .def_readwrite("compressionLevel", &RecordConfig::compressionLevel, DOC(dai, RecordConfig, compressionLevel)); + pipelineStateApi.def("nodes", static_cast(&PipelineStateApi::nodes), DOC(dai, PipelineStateApi, nodes)) + .def("nodes", + static_cast&)>(&PipelineStateApi::nodes), + py::arg("nodeIds"), + DOC(dai, PipelineStateApi, nodes, 2)) + .def("nodes", + static_cast(&PipelineStateApi::nodes), + py::arg("nodeId"), + DOC(dai, PipelineStateApi, nodes, 3)); + + nodesStateApi.def("summary", &NodesStateApi::summary, DOC(dai, NodesStateApi, summary)) + .def("detailed", &NodesStateApi::detailed, DOC(dai, NodesStateApi, detailed)) + .def("outputs", &NodesStateApi::outputs, DOC(dai, NodesStateApi, outputs)) + .def("inputs", &NodesStateApi::inputs, DOC(dai, NodesStateApi, inputs)) + .def("otherTimings", &NodesStateApi::otherTimings, DOC(dai, NodesStateApi, otherTimings)); + + nodeStateApi.def("summary", &NodeStateApi::summary, DOC(dai, NodeStateApi, summary)) + .def("detailed", &NodeStateApi::detailed, DOC(dai, NodeStateApi, detailed)) + .def("outputs", + static_cast (NodeStateApi::*)()>(&NodeStateApi::outputs), + DOC(dai, NodeStateApi, outputs)) + .def("outputs", + static_cast (NodeStateApi::*)(const std::vector&)>( + &NodeStateApi::outputs), + py::arg("outputNames"), + DOC(dai, NodeStateApi, outputs, 2)) + .def("outputs", + static_cast(&NodeStateApi::outputs), + py::arg("outputName"), + DOC(dai, NodeStateApi, outputs, 3)) + .def("inputs", + static_cast (NodeStateApi::*)()>(&NodeStateApi::inputs), + DOC(dai, NodeStateApi, inputs)) + .def("inputs", + static_cast (NodeStateApi::*)(const std::vector&)>(&NodeStateApi::inputs), + py::arg("inputNames"), + DOC(dai, NodeStateApi, inputs, 2)) + .def("inputs", + static_cast(&NodeStateApi::inputs), + py::arg("inputName"), + DOC(dai, NodeStateApi, inputs, 3)) + .def("otherTimings", + static_cast (NodeStateApi::*)()>(&NodeStateApi::otherTimings), + DOC(dai, NodeStateApi, otherTimings)) + .def("otherTimings", + static_cast (NodeStateApi::*)(const std::vector&)>(&NodeStateApi::otherTimings), + py::arg("statNames"), + DOC(dai, NodeStateApi, otherTimings, 2)) + .def("otherStats", + static_cast(&NodeStateApi::otherStats), + py::arg("statName"), + DOC(dai, NodeStateApi, otherStats)); + // bind pipeline pipeline .def(py::init([](bool createImplicitDevice) { diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 8780f894a..4991b770a 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -12,6 +12,7 @@ #include "AssetManager.hpp" #include "DeviceNode.hpp" #include "Node.hpp" +#include "PipelineStateApi.hpp" #include "depthai/device/CalibrationHandler.hpp" #include "depthai/device/Device.hpp" #include "depthai/openvino/OpenVINO.hpp" @@ -30,344 +31,6 @@ namespace dai { namespace fs = std::filesystem; -/** - * pipeline.getState().nodes({nodeId1}).summary() -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).detailed() -> std::unordered_map; - * pipeline.getState().nodes(nodeId1).detailed() -> NodeState; - * pipeline.getState().nodes({nodeId1}).outputs() -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).outputs({outputName1}) -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).outputs(outputName) -> TimingStats; - * pipeline.getState().nodes({nodeId1}).events(); - * pipeline.getState().nodes({nodeId1}).inputs() -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).inputs({inputName1}) -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).inputs(inputName) -> QueueState; - * pipeline.getState().nodes({nodeId1}).otherStats() -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).otherStats({statName1}) -> std::unordered_map; - * pipeline.getState().nodes({nodeId1}).outputs(statName) -> TimingStats; - */ -// TODO move this somewhere else -class NodesStateApi { - std::vector nodeIds; - - std::shared_ptr pipelineStateOut; - std::shared_ptr pipelineStateRequest; - - public: - explicit NodesStateApi(std::vector nodeIds, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) - : nodeIds(std::move(nodeIds)), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} - PipelineState summary() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.events = false; - nodeCfg.inputs = {}; // Do not send any - nodeCfg.outputs = {}; // Do not send any - nodeCfg.others = {}; // Do not send any - cfg.nodes.push_back(nodeCfg); - } - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - return *state; - } - PipelineState detailed() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - } - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - return *state; - } - std::unordered_map> outputs() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.inputs = {}; // Do not send any - nodeCfg.others = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - } - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - std::unordered_map> result; - for(auto& [nodeId, nodeState] : state->nodeStates) { - result[nodeId] = nodeState.outputStates; - } - return result; - } - std::unordered_map> inputs() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.outputs = {}; // Do not send any - nodeCfg.others = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - } - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - std::unordered_map> result; - for(auto& [nodeId, nodeState] : state->nodeStates) { - result[nodeId] = nodeState.inputStates; - } - return result; - } - std::unordered_map> otherTimings() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - for(auto id : nodeIds) { - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = id; - nodeCfg.inputs = {}; // Do not send any - nodeCfg.outputs = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - } - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - std::unordered_map> result; - for(auto& [nodeId, nodeState] : state->nodeStates) { - result[nodeId] = nodeState.otherTimings; - } - return result; - } -}; -class NodeStateApi { - Node::Id nodeId; - - std::shared_ptr pipelineStateOut; - std::shared_ptr pipelineStateRequest; - - public: - explicit NodeStateApi(Node::Id nodeId, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) - : nodeId(nodeId), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} - NodeState summary() { - return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).summary().nodeStates[nodeId]; - } - NodeState detailed() { - return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).detailed().nodeStates[nodeId]; - } - std::unordered_map outputs() { - return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).outputs()[nodeId]; - } - std::unordered_map inputs() { - return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).inputs()[nodeId]; - } - std::unordered_map otherTimings() { - return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).otherTimings()[nodeId]; - } - std::unordered_map outputs(const std::vector& outputNames) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.outputs = outputNames; - nodeCfg.inputs = {}; // Do not send any - nodeCfg.others = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { - throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); - } - std::unordered_map result; - for(const auto& outputName : outputNames) { - result[outputName] = state->nodeStates[nodeId].outputStates[outputName]; - } - return result; - } - NodeState::OutputQueueState outputs(const std::string& outputName) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.outputs = {outputName}; - nodeCfg.inputs = {}; // Do not send any - nodeCfg.others = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { - throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); - } - if(state->nodeStates[nodeId].outputStates.find(outputName) == state->nodeStates[nodeId].outputStates.end()) { - throw std::runtime_error("Output name " + outputName + " not found in NodeState for node ID " + std::to_string(nodeId)); - } - return state->nodeStates[nodeId].outputStates[outputName]; - } - std::vector events() { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.outputs = {}; // Do not send any - nodeCfg.inputs = {}; // Do not send any - nodeCfg.others = {}; // Do not send any - nodeCfg.events = true; - cfg.nodes.push_back(nodeCfg); - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { - throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); - } - return state->nodeStates[nodeId].events; - } - std::unordered_map inputs(const std::vector& inputNames) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.inputs = inputNames; - nodeCfg.outputs = {}; // Do not send any - nodeCfg.others = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { - throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); - } - std::unordered_map result; - for(const auto& inputName : inputNames) { - result[inputName] = state->nodeStates[nodeId].inputStates[inputName]; - } - return result; - } - NodeState::InputQueueState inputs(const std::string& inputName) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.inputs = {inputName}; - nodeCfg.outputs = {}; // Do not send any - nodeCfg.others = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { - throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); - } - if(state->nodeStates[nodeId].inputStates.find(inputName) == state->nodeStates[nodeId].inputStates.end()) { - throw std::runtime_error("Input name " + inputName + " not found in NodeState for node ID " + std::to_string(nodeId)); - } - return state->nodeStates[nodeId].inputStates[inputName]; - } - std::unordered_map otherTimings(const std::vector& statNames) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.others = statNames; - nodeCfg.outputs = {}; // Do not send any - nodeCfg.inputs = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { - throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); - } - std::unordered_map result; - for(const auto& otherName : statNames) { - result[otherName] = state->nodeStates[nodeId].otherTimings[otherName]; - } - return result; - } - NodeState::Timing otherStats(const std::string& statName) { - PipelineEventAggregationConfig cfg; - cfg.repeat = false; - cfg.setTimestamp(std::chrono::steady_clock::now()); - NodeEventAggregationConfig nodeCfg; - nodeCfg.nodeId = nodeId; - nodeCfg.others = {statName}; - nodeCfg.outputs = {}; // Do not send any - nodeCfg.inputs = {}; // Do not send any - nodeCfg.events = false; - cfg.nodes.push_back(nodeCfg); - - pipelineStateRequest->send(std::make_shared(cfg)); - auto state = pipelineStateOut->get(); - if(!state) throw std::runtime_error("Failed to get PipelineState"); - if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { - throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); - } - if(state->nodeStates[nodeId].otherTimings.find(statName) == state->nodeStates[nodeId].otherTimings.end()) { - throw std::runtime_error("Stat name " + statName + " not found in NodeState for node ID " + std::to_string(nodeId)); - } - return state->nodeStates[nodeId].otherTimings[statName]; - } -}; -class PipelineStateApi { - std::shared_ptr pipelineStateOut; - std::shared_ptr pipelineStateRequest; - std::vector nodeIds; // empty means all nodes - - public: - PipelineStateApi(std::shared_ptr pipelineStateOut, - std::shared_ptr pipelineStateRequest, - const std::vector>& allNodes) - : pipelineStateOut(std::move(pipelineStateOut)), pipelineStateRequest(std::move(pipelineStateRequest)) { - for(const auto& n : allNodes) { - nodeIds.push_back(n->id); - } - } - NodesStateApi nodes() { - return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); - } - NodesStateApi nodes(const std::vector& nodeIds) { - return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); - } - NodeStateApi nodes(Node::Id nodeId) { - return NodeStateApi(nodeId, pipelineStateOut, pipelineStateRequest); - } -}; - class PipelineImpl : public std::enable_shared_from_this { friend class Pipeline; friend class Node; From df8011bbf3e3e8203fe89c6188f8074fbf3f3760 Mon Sep 17 00:00:00 2001 From: asahtik Date: Fri, 10 Oct 2025 13:35:38 +0200 Subject: [PATCH 36/40] Implement tracking events through threads --- .../datatype/PipelineStateBindings.cpp | 3 +- .../pipeline/datatype/PipelineState.hpp | 3 +- .../PipelineEventAggregationProperties.hpp | 3 +- include/depthai/utility/CircularBuffer.hpp | 108 ++++++++++++++++- .../utility/PipelineEventDispatcher.hpp | 1 + .../PipelineEventDispatcherInterface.hpp | 2 + .../internal/PipelineEventAggregation.cpp | 109 ++++++++++-------- src/utility/PipelineEventDispatcher.cpp | 20 ++++ 8 files changed, 195 insertions(+), 54 deletions(-) diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp index 0a22611f4..21e560891 100644 --- a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -52,8 +52,7 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { durationEvent.def(py::init<>()) .def("__repr__", &NodeState::DurationEvent::str) .def_readwrite("startEvent", &NodeState::DurationEvent::startEvent, DOC(dai, NodeState, DurationEvent, startEvent)) - .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, TimingStats, durationUs)) - .def_readwrite("fps", &NodeState::DurationEvent::fps, DOC(dai, NodeState, TimingStats, fps)); + .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, TimingStats, durationUs)); nodeStateTimingStats.def(py::init<>()) .def("__repr__", &NodeState::TimingStats::str) diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp index 261994571..ea73c38a5 100644 --- a/include/depthai/pipeline/datatype/PipelineState.hpp +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -12,8 +12,7 @@ class NodeState { struct DurationEvent { PipelineEvent startEvent; uint64_t durationUs; - float fps; - DEPTHAI_SERIALIZE(DurationEvent, startEvent, durationUs, fps); + DEPTHAI_SERIALIZE(DurationEvent, startEvent, durationUs); }; struct TimingStats { uint64_t minMicros = -1; diff --git a/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp index 04fd9c940..013dd29bf 100644 --- a/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp +++ b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp @@ -10,8 +10,9 @@ namespace dai { struct PipelineEventAggregationProperties : PropertiesSerializable { uint32_t aggregationWindowSize = 100; uint32_t eventBatchSize = 50; + uint32_t eventWaitWindow = 16; }; -DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, aggregationWindowSize, eventBatchSize); +DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, aggregationWindowSize, eventBatchSize, eventWaitWindow); } // namespace dai diff --git a/include/depthai/utility/CircularBuffer.hpp b/include/depthai/utility/CircularBuffer.hpp index c4cf3c181..c0428924e 100644 --- a/include/depthai/utility/CircularBuffer.hpp +++ b/include/depthai/utility/CircularBuffer.hpp @@ -11,12 +11,14 @@ class CircularBuffer { CircularBuffer(size_t size) : maxSize(size) { buffer.reserve(size); } - void add(T value) { + T& add(T value) { if(buffer.size() < maxSize) { buffer.push_back(value); + return buffer.back(); } else { buffer[index] = value; index = (index + 1) % maxSize; + return buffer[(index + maxSize - 1) % maxSize]; } } std::vector getBuffer() const { @@ -53,10 +55,114 @@ class CircularBuffer { return buffer.size(); } + // =========================================================== + // Forward iterator + // =========================================================== + class iterator { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + iterator(CircularBuffer* parent, size_t pos) : parent(parent), pos(pos) {} + + reference operator*() { + return parent->buffer[(parent->start() + pos) % parent->buffer.size()]; + } + pointer operator->() { + return &(**this); + } + + iterator& operator++() { + ++pos; + return *this; + } + iterator operator++(int) { + iterator tmp = *this; + ++(*this); + return tmp; + } + + bool operator==(const iterator& other) const { + return parent == other.parent && pos == other.pos; + } + bool operator!=(const iterator& other) const { + return !(*this == other); + } + + private: + CircularBuffer* parent; + size_t pos; + }; + + iterator begin() { + return iterator(this, 0); + } + iterator end() { + return iterator(this, buffer.size()); + } + + // =========================================================== + // Reverse iterator + // =========================================================== + class reverse_iterator { + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + reverse_iterator(CircularBuffer* parent, size_t pos) : parent(parent), pos(pos) {} + + reference operator*() { + // Map reverse position to logical order + size_t logicalIndex = (parent->start() + parent->buffer.size() - 1 - pos) % parent->buffer.size(); + return parent->buffer[logicalIndex]; + } + pointer operator->() { + return &(**this); + } + + reverse_iterator& operator++() { + ++pos; + return *this; + } + reverse_iterator operator++(int) { + reverse_iterator tmp = *this; + ++(*this); + return tmp; + } + + bool operator==(const reverse_iterator& other) const { + return parent == other.parent && pos == other.pos; + } + bool operator!=(const reverse_iterator& other) const { + return !(*this == other); + } + + private: + CircularBuffer* parent; + size_t pos; + }; + + reverse_iterator rbegin() { + return reverse_iterator(this, 0); + } + reverse_iterator rend() { + return reverse_iterator(this, buffer.size()); + } + private: std::vector buffer; size_t maxSize; size_t index = 0; + + size_t start() const { + return (buffer.size() < maxSize) ? 0 : index; + } }; } // namespace utility diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp index 3590b0967..0908cead6 100644 --- a/include/depthai/utility/PipelineEventDispatcher.hpp +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -43,6 +43,7 @@ class PipelineEventDispatcher : public PipelineEventDispatcherInterface { void endOutputEvent(const std::string& source) override; void endCustomEvent(const std::string& source) override; void pingEvent(PipelineEvent::Type type, const std::string& source) override; + void pingTrackedEvent(PipelineEvent::Type type, const std::string& source, int64_t sequenceNum) override; void pingMainLoopEvent() override; void pingCustomEvent(const std::string& source) override; void pingInputEvent(const std::string& source, int32_t status, std::optional queueSize = std::nullopt) override; diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp index e8f35751f..e50b35537 100644 --- a/include/depthai/utility/PipelineEventDispatcherInterface.hpp +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -37,6 +37,8 @@ class PipelineEventDispatcherInterface { virtual void endOutputEvent(const std::string& source) = 0; virtual void endCustomEvent(const std::string& source) = 0; virtual void pingEvent(PipelineEvent::Type type, const std::string& source) = 0; + // The sequenceNum should be unique. Duration is calculated from sequenceNum - 1 to sequenceNum + virtual void pingTrackedEvent(PipelineEvent::Type type, const std::string& source, int64_t sequenceNum) = 0; virtual void pingMainLoopEvent() = 0; virtual void pingCustomEvent(const std::string& source) = 0; virtual void pingInputEvent(const std::string& source, int32_t status, std::optional queueSize = std::nullopt) = 0; diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp index dde5fc920..31243a82c 100644 --- a/src/pipeline/node/internal/PipelineEventAggregation.cpp +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -24,10 +24,11 @@ class NodeEventAggregation { uint32_t windowSize; uint32_t eventBatchSize; + uint32_t eventWaitWindow; public: - NodeEventAggregation(uint32_t windowSize, uint32_t eventBatchSize, std::shared_ptr logger) - : logger(logger), windowSize(windowSize), eventBatchSize(eventBatchSize), eventsBuffer(windowSize) {} + NodeEventAggregation(uint32_t windowSize, uint32_t eventBatchSize, uint32_t eventWaitWindow, std::shared_ptr logger) + : logger(logger), windowSize(windowSize), eventBatchSize(eventBatchSize), eventWaitWindow(eventWaitWindow), eventsBuffer(windowSize) {} NodeState state; utility::CircularBuffer eventsBuffer; std::unordered_map>> inputTimingsBuffers; @@ -45,22 +46,33 @@ class NodeEventAggregation { std::unique_ptr> outputsSendFpsBuffer; std::unordered_map>> otherFpsBuffers; - std::unordered_map> ongoingInputEvents; - std::unordered_map> ongoingOutputEvents; - std::optional ongoingGetInputsEvent; - std::optional ongoingSendOutputsEvent; - std::optional ongoingMainLoopEvent; - std::unordered_map> ongoingOtherEvents; + std::unordered_map>>> ongoingInputEvents; + std::unordered_map>>> ongoingOutputEvents; + std::unique_ptr>> ongoingGetInputsEvents; + std::unique_ptr>> ongoingSendOutputsEvents; + std::unique_ptr>> ongoingMainLoopEvents; + std::unordered_map>>> ongoingOtherEvents; uint32_t count = 0; private: + inline std::optional* findOngoingEvent(uint32_t sequenceNum, utility::CircularBuffer>& buffer) { + for(auto rit = buffer.rbegin(); rit != buffer.rend(); ++rit) { + if(rit->has_value() && rit->value().sequenceNum == sequenceNum) { + return &(*rit); + } else if(rit->has_value() && rit->value().sequenceNum < sequenceNum) { + break; + } + } + return nullptr; + } + inline bool updateIntervalBuffers(PipelineEvent& event) { using namespace std::chrono; std::unique_ptr> emptyIntBuffer; std::unique_ptr> emptyTimeBuffer; - auto& ongoingEvent = [&]() -> std::optional& { + auto& ongoingEvents = [&]() -> std::unique_ptr>>& { switch(event.type) { case PipelineEvent::Type::LOOP: throw std::runtime_error("LOOP event should not be an interval"); @@ -71,11 +83,11 @@ class NodeEventAggregation { case PipelineEvent::Type::CUSTOM: return ongoingOtherEvents[event.source]; case PipelineEvent::Type::INPUT_BLOCK: - return ongoingGetInputsEvent; + return ongoingGetInputsEvents; case PipelineEvent::Type::OUTPUT_BLOCK: - return ongoingSendOutputsEvent; + return ongoingSendOutputsEvents; } - return ongoingMainLoopEvent; // To silence compiler warning + return ongoingMainLoopEvents; // To silence compiler warning }(); auto& timingsBuffer = [&]() -> std::unique_ptr>& { switch(event.type) { @@ -112,34 +124,28 @@ class NodeEventAggregation { return emptyTimeBuffer; // To silence compiler warning }(); + if(ongoingEvents == nullptr) ongoingEvents = std::make_unique>>(eventWaitWindow); if(timingsBuffer == nullptr) timingsBuffer = std::make_unique>(windowSize); if(fpsBuffer == nullptr) fpsBuffer = std::make_unique>(windowSize); - if(ongoingEvent.has_value() && ongoingEvent->sequenceNum == event.sequenceNum && event.interval == PipelineEvent::Interval::END) { + auto* ongoingEvent = findOngoingEvent(event.sequenceNum, *ongoingEvents); + + if(ongoingEvent && ongoingEvent->has_value() && event.interval == PipelineEvent::Interval::END) { // End event NodeState::DurationEvent durationEvent; - durationEvent.startEvent = *ongoingEvent; - durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->getTimestamp()).count(); + durationEvent.startEvent = ongoingEvent->value(); + durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->value().getTimestamp()).count(); eventsBuffer.add(durationEvent); - state.events = eventsBuffer.getBuffer(); timingsBuffer->add(durationEvent.durationUs); fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); - ongoingEvent = std::nullopt; + *ongoingEvent = std::nullopt; return true; } else if(event.interval == PipelineEvent::Interval::START) { - if(ongoingEvent.has_value()) { - // TODO: add ability to wait for multiple events (nn hailo threaded processing time - events with custom ids for tracking) - // logger->warn("Ongoing event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", - // ongoingEvent->sequenceNum, - // event.sequenceNum, - // ongoingEvent->source, - // event.nodeId); - } // Start event - ongoingEvent = event; + ongoingEvents->add(event); } return false; } @@ -149,10 +155,10 @@ class NodeEventAggregation { std::unique_ptr> emptyIntBuffer; std::unique_ptr> emptyTimeBuffer; - auto& ongoingEvent = [&]() -> std::optional& { + auto& ongoingEvents = [&]() -> std::unique_ptr>>& { switch(event.type) { case PipelineEvent::Type::LOOP: - return ongoingMainLoopEvent; + return ongoingMainLoopEvents; case PipelineEvent::Type::CUSTOM: return ongoingOtherEvents[event.source]; case PipelineEvent::Type::INPUT: @@ -161,7 +167,7 @@ class NodeEventAggregation { case PipelineEvent::Type::OUTPUT_BLOCK: throw std::runtime_error("INPUT and OUTPUT events should not be pings"); } - return ongoingMainLoopEvent; // To silence compiler warning + return ongoingMainLoopEvents; // To silence compiler warning }(); auto& timingsBuffer = [&]() -> std::unique_ptr>& { switch(event.type) { @@ -192,33 +198,29 @@ class NodeEventAggregation { return emptyTimeBuffer; // To silence compiler warning }(); + if(ongoingEvents == nullptr) ongoingEvents = std::make_unique>>(eventWaitWindow); if(timingsBuffer == nullptr) timingsBuffer = std::make_unique>(windowSize); if(fpsBuffer == nullptr) fpsBuffer = std::make_unique>(windowSize); - if(ongoingEvent.has_value() && ongoingEvent->sequenceNum == event.sequenceNum - 1) { + auto* ongoingEvent = findOngoingEvent(event.sequenceNum - 1, *ongoingEvents); + + if(ongoingEvent && ongoingEvent->has_value()) { // End event NodeState::DurationEvent durationEvent; - durationEvent.startEvent = *ongoingEvent; - durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->getTimestamp()).count(); + durationEvent.startEvent = ongoingEvent->value(); + durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->value().getTimestamp()).count(); eventsBuffer.add(durationEvent); - state.events = eventsBuffer.getBuffer(); timingsBuffer->add(durationEvent.durationUs); if(fpsBuffer) fpsBuffer->add({durationEvent.startEvent.getTimestamp(), durationEvent.startEvent.getSequenceNum()}); // Start event - ongoingEvent = event; + ongoingEvents->add(event); return true; - } else if(ongoingEvent.has_value()) { - // logger->warn("Ongoing main loop event (seq {}) not finished before new one (seq {}) started. Event source: {}, node {}", - // ongoingEvent->sequenceNum, - // event.sequenceNum, - // ongoingEvent->source, - // event.nodeId); } // Start event - ongoingEvent = event; + ongoingEvents->add(event); return false; } @@ -368,6 +370,7 @@ class PipelineEventHandler { Node::InputMap* inputs; uint32_t aggregationWindowSize; uint32_t eventBatchSize; + uint32_t eventWaitWindow; std::atomic running; @@ -378,8 +381,9 @@ class PipelineEventHandler { std::shared_mutex mutex; public: - PipelineEventHandler(Node::InputMap* inputs, uint32_t aggregationWindowSize, uint32_t eventBatchSize, std::shared_ptr logger) - : inputs(inputs), aggregationWindowSize(aggregationWindowSize), eventBatchSize(eventBatchSize), logger(logger) {} + PipelineEventHandler( + Node::InputMap* inputs, uint32_t aggregationWindowSize, uint32_t eventBatchSize, uint32_t eventWaitWindow, std::shared_ptr logger) + : inputs(inputs), aggregationWindowSize(aggregationWindowSize), eventBatchSize(eventBatchSize), eventWaitWindow(eventWaitWindow), logger(logger) {} void threadedRun() { while(running) { std::unordered_map> events; @@ -391,7 +395,7 @@ class PipelineEventHandler { for(auto& [k, event] : events) { if(event != nullptr) { if(nodeStates.find(event->nodeId) == nodeStates.end()) { - nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(aggregationWindowSize, eventBatchSize, logger)); + nodeStates.insert_or_assign(event->nodeId, NodeEventAggregation(aggregationWindowSize, eventBatchSize, eventWaitWindow, logger)); } { std::unique_lock lock(mutex); @@ -412,11 +416,12 @@ class PipelineEventHandler { running = false; if(thread.joinable()) thread.join(); } - bool getState(std::shared_ptr outState) { + bool getState(std::shared_ptr outState, bool sendEvents) { std::shared_lock lock(mutex); bool updated = false; for(auto& [nodeId, nodeState] : nodeStates) { outState->nodeStates[nodeId] = nodeState.state; + if(sendEvents) outState->nodeStates[nodeId].events = nodeState.eventsBuffer.getBuffer(); if(nodeState.count % eventBatchSize == 0) updated = true; } return updated; @@ -437,7 +442,7 @@ bool PipelineEventAggregation::runOnHost() const { void PipelineEventAggregation::run() { auto& logger = pimpl->logger; - PipelineEventHandler handler(&inputs, properties.aggregationWindowSize, properties.eventBatchSize, logger); + PipelineEventHandler handler(&inputs, properties.aggregationWindowSize, properties.eventBatchSize, properties.eventWaitWindow, logger); handler.run(); std::optional currentConfig; @@ -453,7 +458,16 @@ void PipelineEventAggregation::run() { } } if(gotConfig || (currentConfig.has_value() && currentConfig->repeat)) { - bool updated = handler.getState(outState); + bool sendEvents = false; + if(currentConfig.has_value()) { + for(const auto& nodeCfg : currentConfig->nodes) { + if(nodeCfg.events) { + sendEvents = true; + break; + } + } + } + bool updated = handler.getState(outState, sendEvents); outState->sequenceNum = sequenceNum++; outState->configSequenceNum = currentConfig.has_value() ? currentConfig->sequenceNum : 0; outState->setTimestamp(std::chrono::steady_clock::now()); @@ -494,7 +508,6 @@ void PipelineEventAggregation::run() { } } } - if(!nodeConfig->events) it->second.events.clear(); ++it; } } diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index a67504518..4a16c4506 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -131,6 +131,26 @@ void PipelineEventDispatcher::pingEvent(PipelineEvent::Type type, const std::str out->send(std::make_shared(event.event)); } } +void PipelineEventDispatcher::pingTrackedEvent(PipelineEvent::Type type, const std::string& source, int64_t sequenceNum) { + if(!sendEvents) return; + checkNodeId(); + if(blacklist(type, source)) return; + + auto now = std::chrono::steady_clock::now(); + + auto event = std::make_shared(); + event->setTimestamp(now); + event->tsDevice = event->ts; + event->sequenceNum = sequenceNum; + event->nodeId = nodeId; + event->interval = PipelineEvent::Interval::NONE; + event->type = type; + event->source = source; + + if(out) { + out->send(event); + } +} void PipelineEventDispatcher::pingMainLoopEvent() { pingEvent(PipelineEvent::Type::LOOP, "_mainLoop"); } From fd27d195beeb3c29fc31d8e3ed2a4e708469ed44 Mon Sep 17 00:00:00 2001 From: asahtik Date: Fri, 10 Oct 2025 13:40:42 +0200 Subject: [PATCH 37/40] Add missing files --- include/depthai/pipeline/PipelineStateApi.hpp | 96 +++++++ src/pipeline/PipelineStateApi.cpp | 271 ++++++++++++++++++ 2 files changed, 367 insertions(+) create mode 100644 include/depthai/pipeline/PipelineStateApi.hpp create mode 100644 src/pipeline/PipelineStateApi.cpp diff --git a/include/depthai/pipeline/PipelineStateApi.hpp b/include/depthai/pipeline/PipelineStateApi.hpp new file mode 100644 index 000000000..90f0bd98e --- /dev/null +++ b/include/depthai/pipeline/PipelineStateApi.hpp @@ -0,0 +1,96 @@ +#pragma once + +#include "Node.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" + +namespace dai { + +/** + * pipeline.getState().nodes({nodeId1}).summary() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).detailed() -> std::unordered_map; + * pipeline.getState().nodes(nodeId1).detailed() -> NodeState; + * pipeline.getState().nodes({nodeId1}).outputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs({outputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(outputName) -> TimingStats; + * pipeline.getState().nodes({nodeId1}).events(); + * pipeline.getState().nodes({nodeId1}).inputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs({inputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs(inputName) -> QueueState; + * pipeline.getState().nodes({nodeId1}).otherStats() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).otherStats({statName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(statName) -> TimingStats; + */ +class NodesStateApi { + std::vector nodeIds; + + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + + public: + explicit NodesStateApi(std::vector nodeIds, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + : nodeIds(std::move(nodeIds)), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} + PipelineState summary(); + PipelineState detailed(); + std::unordered_map> outputs(); + std::unordered_map> inputs(); + std::unordered_map> otherTimings(); +}; +class NodeStateApi { + Node::Id nodeId; + + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + + public: + explicit NodeStateApi(Node::Id nodeId, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + : nodeId(nodeId), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} + NodeState summary() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).summary().nodeStates[nodeId]; + } + NodeState detailed() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).detailed().nodeStates[nodeId]; + } + std::unordered_map outputs() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).outputs()[nodeId]; + } + std::unordered_map inputs() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).inputs()[nodeId]; + } + std::unordered_map otherTimings() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).otherTimings()[nodeId]; + } + std::unordered_map outputs(const std::vector& outputNames); + NodeState::OutputQueueState outputs(const std::string& outputName); + std::vector events(); + std::unordered_map inputs(const std::vector& inputNames); + NodeState::InputQueueState inputs(const std::string& inputName); + std::unordered_map otherTimings(const std::vector& statNames); + NodeState::Timing otherStats(const std::string& statName); +}; +class PipelineStateApi { + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + std::vector nodeIds; // empty means all nodes + + public: + PipelineStateApi(std::shared_ptr pipelineStateOut, + std::shared_ptr pipelineStateRequest, + const std::vector>& allNodes) + : pipelineStateOut(std::move(pipelineStateOut)), pipelineStateRequest(std::move(pipelineStateRequest)) { + for(const auto& n : allNodes) { + nodeIds.push_back(n->id); + } + } + NodesStateApi nodes() { + return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); + } + NodesStateApi nodes(const std::vector& nodeIds) { + return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); + } + NodeStateApi nodes(Node::Id nodeId) { + return NodeStateApi(nodeId, pipelineStateOut, pipelineStateRequest); + } +}; + +} // namespace dai diff --git a/src/pipeline/PipelineStateApi.cpp b/src/pipeline/PipelineStateApi.cpp new file mode 100644 index 000000000..35beb756d --- /dev/null +++ b/src/pipeline/PipelineStateApi.cpp @@ -0,0 +1,271 @@ +#include "depthai/pipeline/PipelineStateApi.hpp" + +#include "depthai/pipeline/InputQueue.hpp" + +namespace dai { + +PipelineState NodesStateApi::summary() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.events = false; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + return *state; +} +PipelineState NodesStateApi::detailed() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + return *state; +} +std::unordered_map> NodesStateApi::outputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.outputStates; + } + return result; +} +std::unordered_map> NodesStateApi::inputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.inputStates; + } + return result; +} +std::unordered_map> NodesStateApi::otherTimings() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.outputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.otherTimings; + } + return result; +} + +std::unordered_map NodeStateApi::outputs(const std::vector& outputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = outputNames; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& outputName : outputNames) { + result[outputName] = state->nodeStates[nodeId].outputStates[outputName]; + } + return result; +} +NodeState::OutputQueueState NodeStateApi::outputs(const std::string& outputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {outputName}; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].outputStates.find(outputName) == state->nodeStates[nodeId].outputStates.end()) { + throw std::runtime_error("Output name " + outputName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].outputStates[outputName]; +} +std::vector NodeStateApi::events() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = true; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + return state->nodeStates[nodeId].events; +} +std::unordered_map NodeStateApi::inputs(const std::vector& inputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = inputNames; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& inputName : inputNames) { + result[inputName] = state->nodeStates[nodeId].inputStates[inputName]; + } + return result; +} +NodeState::InputQueueState NodeStateApi::inputs(const std::string& inputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = {inputName}; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].inputStates.find(inputName) == state->nodeStates[nodeId].inputStates.end()) { + throw std::runtime_error("Input name " + inputName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].inputStates[inputName]; +} +std::unordered_map NodeStateApi::otherTimings(const std::vector& statNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = statNames; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& otherName : statNames) { + result[otherName] = state->nodeStates[nodeId].otherTimings[otherName]; + } + return result; +} +NodeState::Timing NodeStateApi::otherStats(const std::string& statName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = {statName}; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].otherTimings.find(statName) == state->nodeStates[nodeId].otherTimings.end()) { + throw std::runtime_error("Stat name " + statName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].otherTimings[statName]; +} + +} // namespace dai From 66e4fcdee5777edb00788927490577bb363a908c Mon Sep 17 00:00:00 2001 From: asahtik Date: Fri, 10 Oct 2025 14:52:25 +0200 Subject: [PATCH 38/40] Add mutex lock to pipeline event dispatcher --- .../depthai/utility/PipelineEventDispatcher.hpp | 2 ++ src/utility/PipelineEventDispatcher.cpp | 14 ++++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp index 0908cead6..6a0595f6a 100644 --- a/include/depthai/utility/PipelineEventDispatcher.hpp +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -25,6 +25,8 @@ class PipelineEventDispatcher : public PipelineEventDispatcherInterface { void checkNodeId(); uint32_t sequenceNum = 0; + + std::mutex mutex; public: PipelineEventDispatcher() = delete; diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp index 4a16c4506..e0a9e79bf 100644 --- a/src/utility/PipelineEventDispatcher.cpp +++ b/src/utility/PipelineEventDispatcher.cpp @@ -41,11 +41,12 @@ void PipelineEventDispatcher::setNodeId(int64_t id) { nodeId = id; } void PipelineEventDispatcher::startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { - // TODO add mutex if(!sendEvents) return; checkNodeId(); if(blacklist(type, source)) return; + std::lock_guard lock(mutex); + auto& event = events[makeKey(type, source)]; event.event.setTimestamp(std::chrono::steady_clock::now()); event.event.tsDevice = event.event.ts; @@ -71,11 +72,12 @@ void PipelineEventDispatcher::startCustomEvent(const std::string& source) { startEvent(PipelineEvent::Type::CUSTOM, source, std::nullopt); } void PipelineEventDispatcher::endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { - // TODO add mutex if(!sendEvents) return; checkNodeId(); if(blacklist(type, source)) return; + std::lock_guard lock(mutex); + auto now = std::chrono::steady_clock::now(); auto& event = events[makeKey(type, source)]; @@ -108,11 +110,12 @@ void PipelineEventDispatcher::endCustomEvent(const std::string& source) { endEvent(PipelineEvent::Type::CUSTOM, source, std::nullopt); } void PipelineEventDispatcher::pingEvent(PipelineEvent::Type type, const std::string& source) { - // TODO add mutex if(!sendEvents) return; checkNodeId(); if(blacklist(type, source)) return; + std::lock_guard lock(mutex); + auto now = std::chrono::steady_clock::now(); auto& event = events[makeKey(type, source)]; @@ -136,6 +139,8 @@ void PipelineEventDispatcher::pingTrackedEvent(PipelineEvent::Type type, const s checkNodeId(); if(blacklist(type, source)) return; + std::lock_guard lock(mutex); + auto now = std::chrono::steady_clock::now(); auto event = std::make_shared(); @@ -158,11 +163,12 @@ void PipelineEventDispatcher::pingCustomEvent(const std::string& source) { pingEvent(PipelineEvent::Type::CUSTOM, source); } void PipelineEventDispatcher::pingInputEvent(const std::string& source, int32_t status, std::optional queueSize) { - // TODO add mutex if(!sendEvents) return; checkNodeId(); if(blacklist(PipelineEvent::Type::INPUT, source)) return; + std::lock_guard lock(mutex); + auto now = std::chrono::steady_clock::now(); auto& event = events[makeKey(PipelineEvent::Type::INPUT, source)]; From 75592f7d03a9bac5b49cb4e7650a19fcfbc7ad30 Mon Sep 17 00:00:00 2001 From: asahtik Date: Mon, 13 Oct 2025 16:06:32 +0200 Subject: [PATCH 39/40] Fix docstrings errors --- .../src/pipeline/datatype/PipelineEventBindings.cpp | 6 +++--- .../src/pipeline/datatype/PipelineStateBindings.cpp | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp index f3437db59..23435044f 100644 --- a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -53,7 +53,7 @@ void bind_pipelineevent(pybind11::module& m, void* pCallstack) { .def("getTimestamp", &PipelineEvent::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) .def("getTimestampDevice", &PipelineEvent::Buffer::getTimestampDevice, DOC(dai, Buffer, getTimestampDevice)) .def("getSequenceNum", &PipelineEvent::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) - .def("setTimestamp", &PipelineEvent::setTimestamp, DOC(dai, PipelineEvent, setTimestamp)) - .def("setTimestampDevice", &PipelineEvent::setTimestampDevice, DOC(dai, PipelineEvent, setTimestampDevice)) - .def("setSequenceNum", &PipelineEvent::setSequenceNum, DOC(dai, PipelineEvent, setSequenceNum)); + .def("setTimestamp", &PipelineEvent::setTimestamp, DOC(dai, Buffer, setTimestamp)) + .def("setTimestampDevice", &PipelineEvent::setTimestampDevice, DOC(dai, Buffer, setTimestampDevice)) + .def("setSequenceNum", &PipelineEvent::setSequenceNum, DOC(dai, Buffer, setSequenceNum)); } diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp index 21e560891..bb42da24b 100644 --- a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -52,7 +52,7 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { durationEvent.def(py::init<>()) .def("__repr__", &NodeState::DurationEvent::str) .def_readwrite("startEvent", &NodeState::DurationEvent::startEvent, DOC(dai, NodeState, DurationEvent, startEvent)) - .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, TimingStats, durationUs)); + .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, DurationEvent, durationUs)); nodeStateTimingStats.def(py::init<>()) .def("__repr__", &NodeState::TimingStats::str) @@ -106,7 +106,7 @@ void bind_pipelinestate(pybind11::module& m, void* pCallstack) { .def("getTimestamp", &PipelineState::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) .def("getTimestampDevice", &PipelineState::Buffer::getTimestampDevice, DOC(dai, Buffer, getTimestampDevice)) .def("getSequenceNum", &PipelineState::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) - .def("setTimestamp", &PipelineState::setTimestamp, DOC(dai, PipelineState, setTimestamp)) - .def("setTimestampDevice", &PipelineState::setTimestampDevice, DOC(dai, PipelineState, setTimestampDevice)) - .def("setSequenceNum", &PipelineState::setSequenceNum, DOC(dai, PipelineState, setSequenceNum)); + .def("setTimestamp", &PipelineState::setTimestamp, DOC(dai, Buffer, setTimestamp)) + .def("setTimestampDevice", &PipelineState::setTimestampDevice, DOC(dai, Buffer, setTimestampDevice)) + .def("setSequenceNum", &PipelineState::setSequenceNum, DOC(dai, Buffer, setSequenceNum)); } From 4cef1e6a7231ee19a0eae6835348eb022f8da84f Mon Sep 17 00:00:00 2001 From: asahtik Date: Thu, 16 Oct 2025 16:29:22 +0200 Subject: [PATCH 40/40] Disable pipeline events --- src/pipeline/Pipeline.cpp | 90 ++++++++++++++++++++----------------- src/utility/Environment.hpp | 7 +-- 2 files changed, 53 insertions(+), 44 deletions(-) diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 9ed64bdee..b3e1157e2 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -659,51 +659,59 @@ void PipelineImpl::build() { } // Create pipeline event aggregator node and link - // Check if any nodes are on host or device - bool hasHostNodes = false; - bool hasDeviceNodes = false; - for(const auto& node : getAllNodes()) { - if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; - - if(node->runOnHost()) { - hasHostNodes = true; - } else { - hasDeviceNodes = true; + bool enablePipelineDebugging = utility::getEnvAs("DEPTHAI_PIPELINE_DEBUGGING", false); + if(enablePipelineDebugging) { + // Check if any nodes are on host or device + bool hasHostNodes = false; + bool hasDeviceNodes = false; + for(const auto& node : getAllNodes()) { + if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; + + if(node->runOnHost()) { + hasHostNodes = true; + } else { + hasDeviceNodes = true; + } } - } - std::shared_ptr hostEventAgg = nullptr; - std::shared_ptr deviceEventAgg = nullptr; - if(hasHostNodes) { - hostEventAgg = parent.create(); - hostEventAgg->setRunOnHost(true); - } - if(hasDeviceNodes) { - deviceEventAgg = parent.create(); - deviceEventAgg->setRunOnHost(false); - } - for(auto& node : getAllNodes()) { - if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; - - auto threadedNode = std::dynamic_pointer_cast(node); - if(threadedNode) { - if(node->runOnHost() && hostEventAgg && node->id != hostEventAgg->id) { - threadedNode->pipelineEventOutput.link(hostEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); - } else if(!node->runOnHost() && deviceEventAgg && node->id != deviceEventAgg->id) { - threadedNode->pipelineEventOutput.link(deviceEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + std::shared_ptr hostEventAgg = nullptr; + std::shared_ptr deviceEventAgg = nullptr; + if(hasHostNodes) { + hostEventAgg = parent.create(); + hostEventAgg->setRunOnHost(true); + } + if(hasDeviceNodes) { + deviceEventAgg = parent.create(); + deviceEventAgg->setRunOnHost(false); + } + for(auto& node : getAllNodes()) { + if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; + + auto threadedNode = std::dynamic_pointer_cast(node); + if(threadedNode) { + if(node->runOnHost() && hostEventAgg && node->id != hostEventAgg->id) { + threadedNode->pipelineEventOutput.link(hostEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } else if(!node->runOnHost() && deviceEventAgg && node->id != deviceEventAgg->id) { + threadedNode->pipelineEventOutput.link(deviceEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } } } + auto stateMerge = parent.create()->build(hasDeviceNodes, hasHostNodes); + if(deviceEventAgg) { + deviceEventAgg->out.link(stateMerge->inputDevice); + stateMerge->outRequest.link(deviceEventAgg->request); + } + if(hostEventAgg) { + hostEventAgg->out.link(stateMerge->inputHost); + stateMerge->outRequest.link(hostEventAgg->request); + } + pipelineStateOut = stateMerge->out.createOutputQueue(1, false); + pipelineStateRequest = stateMerge->request.createInputQueue(); } - auto stateMerge = parent.create()->build(hasDeviceNodes, hasHostNodes); - if(deviceEventAgg) { - deviceEventAgg->out.link(stateMerge->inputDevice); - stateMerge->outRequest.link(deviceEventAgg->request); - } - if(hostEventAgg) { - hostEventAgg->out.link(stateMerge->inputHost); - stateMerge->outRequest.link(hostEventAgg->request); - } - pipelineStateOut = stateMerge->out.createOutputQueue(1, false); - pipelineStateRequest = stateMerge->request.createInputQueue(); + } + + if(std::find_if(getAllNodes().begin(), getAllNodes().end(), [](const std::shared_ptr& n) { return strcmp(n->getName(), "PipelineEventAggregation") == 0; }) + == getAllNodes().end()) { + for(auto& node : getAllNodes()) node->pipelineEventDispatcher->sendEvents = false; } isBuild = true; diff --git a/src/utility/Environment.hpp b/src/utility/Environment.hpp index 07f56ec5d..4a0e90099 100644 --- a/src/utility/Environment.hpp +++ b/src/utility/Environment.hpp @@ -97,14 +97,15 @@ T getEnvAs(const std::string& var, T defaultValue, spdlog::logger& logger, bool // bool else if constexpr(std::is_same_v) { - if(value == "1" || value == "true" || value == "TRUE" || value == "True") { + std::transform(value.begin(), value.end(), value.begin(), ::tolower); + if(value == "1" || value == "true" || value == "t" || value == "on") { returnValue = true; - } else if(value == "0" || value == "false" || value == "FALSE" || value == "False") { + } else if(value == "0" || value == "false" || value == "f" || value == "off") { returnValue = false; } else { std::ostringstream message; message << "Failed to convert environment variable " << var << " from '" << value << "' to type " << typeid(T).name(); - message << ". Possible values are '1', 'true', 'TRUE', 'True', '0', 'false', 'FALSE', 'False'"; + message << ". Possible values are '1', 'true', 'TRUE', 'True', 'T', 'ON', '0', 'false', 'FALSE', 'False', 'F', 'OFF'"; throw std::runtime_error(message.str()); } }