diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json new file mode 100644 index 0000000..d3b2256 --- /dev/null +++ b/.vscode/c_cpp_properties.json @@ -0,0 +1,21 @@ +{ + "configurations": [ + { + "name": "Win32", + "includePath": [ + "${workspaceFolder}/**" + ], + "defines": [ + "_DEBUG", + "UNICODE", + "_UNICODE" + ], + "windowsSdkVersion": "10.0.26100.0", + "compilerPath": "cl.exe", + "cStandard": "c17", + "cppStandard": "c++23", + "intelliSenseMode": "windows-msvc-x64" + } + ], + "version": 4 +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 5105813..947aa36 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -13,5 +13,13 @@ "Timespec", "Xeon", "Xunit" - ] + ], + "files.associations": { + "chrono": "cpp", + "xlocale": "cpp", + "xstring": "cpp", + "cmath": "cpp", + "system_error": "cpp", + "span": "cpp" + } } \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 288fbb3..b05802e 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -4,11 +4,13 @@ { "label": "build", "type": "shell", - "command": "dotnet", + "command": "msbuild", "args": [ - "build", - "${workspaceFolder}/src" + "/t:rebuild" ], + "options": { + "cwd": "${workspaceFolder}/src" + }, "group": { "kind": "build", "isDefault": true @@ -21,5 +23,25 @@ "panel": "shared" } }, + { + "label": "nativetests", + "type": "shell", + "dependsOn": "build", + "command": "${workspaceFolder}\\src\\x64\\Debug\\Interprocess.Native.Static.Tests.exe", + "problemMatcher": [], + "group": { + "kind": "test", + } + }, + { + "label": "alltests", + "type": "shell", + "command": "${workspaceFolder}\\test-comprehensive.ps1", + "problemMatcher": [], + "group": { + "kind": "test", + "isDefault": true + } + } ] } \ No newline at end of file diff --git a/src/Interprocess.Native.Static.Tests/CircularBufferTests.cpp b/src/Interprocess.Native.Static.Tests/CircularBufferTests.cpp new file mode 100644 index 0000000..5c98259 --- /dev/null +++ b/src/Interprocess.Native.Static.Tests/CircularBufferTests.cpp @@ -0,0 +1,587 @@ +//****************************************************************************** +// CircularBuffer Comprehensive Test Suite +//****************************************************************************** +// +// Purpose: Comprehensive testing of the CircularBuffer class to protect against +// regressions, particularly the template span bug that was fixed. +// +// Categories: +// - Basic Functionality: Constructor, pointer arithmetic, offset wrapping +// - Write Operations: Span writes, template writes, wrapping, edge cases +// - Read Operations: Basic reads, wrapping, truncation, large offsets +// - Clear Operations: Basic clear, wrapping, zero-length, full buffer +// - Round-Trip Tests: Write/read cycles with wrapping +// - Edge Cases: Single-byte buffer, large offsets, full capacity, overwrites +// - Regression Tests: Span template fix, offset handling consistency +// - Data-Driven Tests: Parameterized tests validating multiple scenarios +// +// Key Regression Protections: +// 1. SpanNotWrittenAsObject - CRITICAL test ensuring std::span is written +// element-by-element, not as an object. Validates the requires constraint. +// 2. OffsetHandlingConsistency - Ensures offset wrapping is correct +// 3. All wrapping tests - Protect against buffer overflow and wrap-around bugs +// +// Usage: +// Run all: --gtest_filter="CircularBufferTest.*" +// Run specific: --gtest_filter="CircularBufferTest.SpanNotWrittenAsObject" +// +//****************************************************************************** + +#include "pch.h" +#include "CircularBuffer.h" +#include "QueueHeader.h" +#include +#include + +using namespace Cloudtoid::Interprocess; + +class CircularBufferTest : public ::testing::Test +{ +protected: + void SetUp() override + { + // Allocate test buffer + testBuffer = new unsigned char[TEST_BUFFER_SIZE]; + std::memset(testBuffer, 0, TEST_BUFFER_SIZE); + } + + void TearDown() override + { + delete[] testBuffer; + } + + static constexpr size_t TEST_BUFFER_SIZE = 1024; + unsigned char* testBuffer = nullptr; +}; + +// ===== Basic Functionality Tests ===== + +TEST_F(CircularBufferTest, ConstructorSetsCapacity) +{ + CircularBuffer buffer(testBuffer, TEST_BUFFER_SIZE); + EXPECT_EQ(buffer.GetCapacity(), TEST_BUFFER_SIZE); +} + +TEST_F(CircularBufferTest, GetPointerReturnsCorrectLocation) +{ + CircularBuffer buffer(testBuffer, 10); + + // Within bounds + EXPECT_EQ(buffer.GetPointer(0), testBuffer); + EXPECT_EQ(buffer.GetPointer(5), testBuffer + 5); + EXPECT_EQ(buffer.GetPointer(9), testBuffer + 9); + + // Wrapping + EXPECT_EQ(buffer.GetPointer(10), testBuffer); + EXPECT_EQ(buffer.GetPointer(15), testBuffer + 5); + EXPECT_EQ(buffer.GetPointer(20), testBuffer); +} + +TEST_F(CircularBufferTest, AdjustedOffsetWrapsCorrectly) +{ + CircularBuffer buffer(testBuffer, 10); + + EXPECT_EQ(buffer.AdjustedOffset(0), 0); + EXPECT_EQ(buffer.AdjustedOffset(5), 5); + EXPECT_EQ(buffer.AdjustedOffset(10), 0); + EXPECT_EQ(buffer.AdjustedOffset(15), 5); + EXPECT_EQ(buffer.AdjustedOffset(100), 0); +} + +// ===== Write Tests ===== + +TEST_F(CircularBufferTest, WriteSpanBasic) +{ + CircularBuffer buffer(testBuffer, 10); + + unsigned char data[] = {1, 2, 3, 4, 5}; + std::span span(data, 5); + + buffer.Write(span, 0); + + for (size_t i = 0; i < 5; ++i) + { + EXPECT_EQ(testBuffer[i], data[i]); + } +} + +TEST_F(CircularBufferTest, WriteSpanWrapping) +{ + CircularBuffer buffer(testBuffer, 10); + + unsigned char data[] = {1, 2, 3, 4, 5}; + std::span span(data, 5); + + // Write starting at offset 8, should wrap to beginning + buffer.Write(span, 8); + + EXPECT_EQ(testBuffer[8], 1); + EXPECT_EQ(testBuffer[9], 2); + EXPECT_EQ(testBuffer[0], 3); + EXPECT_EQ(testBuffer[1], 4); + EXPECT_EQ(testBuffer[2], 5); +} + +TEST_F(CircularBufferTest, WriteSpanAtExactBoundary) +{ + CircularBuffer buffer(testBuffer, 10); + + unsigned char data[] = {1, 2, 3, 4, 5}; + std::span span(data, 5); + + // Write starting at offset 10 (exactly at boundary) + buffer.Write(span, 10); + + for (size_t i = 0; i < 5; ++i) + { + EXPECT_EQ(testBuffer[i], data[i]); + } +} + +TEST_F(CircularBufferTest, WriteStructBasic) +{ + struct TestStruct + { + int a; + double b; + char c; + }; + + CircularBuffer buffer(testBuffer, sizeof(TestStruct) * 2); + + TestStruct original = {42, 3.14159, 'X'}; + buffer.Write(original, 0); + + TestStruct* read = reinterpret_cast(testBuffer); + EXPECT_EQ(read->a, 42); + EXPECT_DOUBLE_EQ(read->b, 3.14159); + EXPECT_EQ(read->c, 'X'); +} + +TEST_F(CircularBufferTest, WriteStructWrapping) +{ + struct TestStruct + { + unsigned char data[8]; + }; + + CircularBuffer buffer(testBuffer, 10); + + TestStruct original; + for (int i = 0; i < 8; ++i) + original.data[i] = static_cast(i + 1); + + // Write starting at offset 5, should wrap + buffer.Write(original, 5); + + // Check wrapped data + for (int i = 0; i < 5; ++i) + EXPECT_EQ(testBuffer[5 + i], i + 1); + + for (int i = 0; i < 3; ++i) + EXPECT_EQ(testBuffer[i], i + 6); +} + +// ===== Read Tests ===== + +TEST_F(CircularBufferTest, ReadBasic) +{ + CircularBuffer buffer(testBuffer, 10); + + // Initialize buffer with known data + for (size_t i = 0; i < 10; ++i) + testBuffer[i] = static_cast(i + 1); + + unsigned char readBuffer[5]; + std::span span(readBuffer, 5); + + auto result = buffer.Read(0, 5, span); + + EXPECT_EQ(result.size(), 5); + for (size_t i = 0; i < 5; ++i) + EXPECT_EQ(result[i], i + 1); +} + +TEST_F(CircularBufferTest, ReadWrapping) +{ + CircularBuffer buffer(testBuffer, 10); + + // Initialize buffer + for (size_t i = 0; i < 10; ++i) + testBuffer[i] = static_cast(i); + + unsigned char readBuffer[5]; + std::span span(readBuffer, 5); + + // Read starting at offset 8 + auto result = buffer.Read(8, 5, span); + + EXPECT_EQ(result.size(), 5); + EXPECT_EQ(result[0], 8); + EXPECT_EQ(result[1], 9); + EXPECT_EQ(result[2], 0); + EXPECT_EQ(result[3], 1); + EXPECT_EQ(result[4], 2); +} + +TEST_F(CircularBufferTest, ReadTruncatesToBufferSize) +{ + CircularBuffer buffer(testBuffer, 10); + + for (size_t i = 0; i < 10; ++i) + testBuffer[i] = static_cast(i); + + unsigned char readBuffer[3]; + std::span span(readBuffer, 3); + + // Request 5 bytes but buffer only has room for 3 + auto result = buffer.Read(0, 5, span); + + EXPECT_EQ(result.size(), 3); + EXPECT_EQ(result[0], 0); + EXPECT_EQ(result[1], 1); + EXPECT_EQ(result[2], 2); +} + +TEST_F(CircularBufferTest, ReadAtOffsetBeyondCapacity) +{ + CircularBuffer buffer(testBuffer, 10); + + for (size_t i = 0; i < 10; ++i) + testBuffer[i] = static_cast(i); + + unsigned char readBuffer[3]; + std::span span(readBuffer, 3); + + // Read at offset 15 (wraps to offset 5) + auto result = buffer.Read(15, 3, span); + + EXPECT_EQ(result.size(), 3); + EXPECT_EQ(result[0], 5); + EXPECT_EQ(result[1], 6); + EXPECT_EQ(result[2], 7); +} + +// ===== Clear Tests ===== + +TEST_F(CircularBufferTest, ClearBasic) +{ + CircularBuffer buffer(testBuffer, 10); + + // Fill with non-zero data + std::fill_n(testBuffer, 10, 0xFF); + + buffer.Clear(0, 5); + + for (size_t i = 0; i < 5; ++i) + EXPECT_EQ(testBuffer[i], 0); + + for (size_t i = 5; i < 10; ++i) + EXPECT_EQ(testBuffer[i], 0xFF); +} + +TEST_F(CircularBufferTest, ClearWrapping) +{ + CircularBuffer buffer(testBuffer, 10); + + std::fill_n(testBuffer, 10, 0xFF); + + // Clear 5 bytes starting at offset 8 + buffer.Clear(8, 5); + + EXPECT_EQ(testBuffer[8], 0); + EXPECT_EQ(testBuffer[9], 0); + EXPECT_EQ(testBuffer[0], 0); + EXPECT_EQ(testBuffer[1], 0); + EXPECT_EQ(testBuffer[2], 0); + + for (size_t i = 3; i < 8; ++i) + EXPECT_EQ(testBuffer[i], 0xFF); +} + +TEST_F(CircularBufferTest, ClearEntireBuffer) +{ + CircularBuffer buffer(testBuffer, 10); + + std::fill_n(testBuffer, 10, 0xFF); + + buffer.Clear(0, 10); + + for (size_t i = 0; i < 10; ++i) + EXPECT_EQ(testBuffer[i], 0); +} + +// ===== Round-trip Tests ===== + +TEST_F(CircularBufferTest, WriteAndReadRoundTrip) +{ + CircularBuffer buffer(testBuffer, 20); + + unsigned char writeData[] = {10, 20, 30, 40, 50}; + std::span writeSpan(writeData, 5); + + buffer.Write(writeSpan, 0); + + unsigned char readBuffer[5]; + std::span readSpan(readBuffer, 5); + + auto result = buffer.Read(0, 5, readSpan); + + EXPECT_EQ(result.size(), 5); + for (size_t i = 0; i < 5; ++i) + EXPECT_EQ(result[i], writeData[i]); +} + +TEST_F(CircularBufferTest, WriteAndReadRoundTripWrapping) +{ + CircularBuffer buffer(testBuffer, 10); + + unsigned char writeData[] = {1, 2, 3, 4, 5, 6, 7}; + std::span writeSpan(writeData, 7); + + // Write starting at offset 7 (will wrap) + buffer.Write(writeSpan, 7); + + unsigned char readBuffer[7]; + std::span readSpan(readBuffer, 7); + + auto result = buffer.Read(7, 7, readSpan); + + EXPECT_EQ(result.size(), 7); + for (size_t i = 0; i < 7; ++i) + EXPECT_EQ(result[i], writeData[i]); +} + +TEST_F(CircularBufferTest, MultipleSequentialWrites) +{ + CircularBuffer buffer(testBuffer, 100); + + unsigned char data1[] = {1, 2, 3}; + unsigned char data2[] = {4, 5, 6}; + unsigned char data3[] = {7, 8, 9}; + + buffer.Write(std::span(data1, 3), 0); + buffer.Write(std::span(data2, 3), 3); + buffer.Write(std::span(data3, 3), 6); + + unsigned char readBuffer[9]; + std::span readSpan(readBuffer, 9); + + auto result = buffer.Read(0, 9, readSpan); + + EXPECT_EQ(result.size(), 9); + for (size_t i = 0; i < 9; ++i) + EXPECT_EQ(result[i], i + 1); +} + +// ===== Edge Cases and Stress Tests ===== + +TEST_F(CircularBufferTest, SingleByteBuffer) +{ + unsigned char singleByte = 0; + CircularBuffer buffer(&singleByte, 1); + + unsigned char writeData[] = {42}; + buffer.Write(std::span(writeData, 1), 0); + + unsigned char readBuffer[1]; + auto result = buffer.Read(0, 1, std::span(readBuffer, 1)); + + EXPECT_EQ(result[0], 42); +} + +TEST_F(CircularBufferTest, LargeOffsetWrapping) +{ + CircularBuffer buffer(testBuffer, 10); + + for (size_t i = 0; i < 10; ++i) + testBuffer[i] = static_cast(i); + + unsigned char readBuffer[3]; + std::span span(readBuffer, 3); + + // Very large offset that wraps multiple times + auto result = buffer.Read(1000005, 3, span); + + EXPECT_EQ(result.size(), 3); + EXPECT_EQ(result[0], 5); + EXPECT_EQ(result[1], 6); + EXPECT_EQ(result[2], 7); +} + +TEST_F(CircularBufferTest, FullBufferWriteAndRead) +{ + constexpr size_t bufferSize = 256; + unsigned char* fullBuffer = new unsigned char[bufferSize]; + CircularBuffer buffer(fullBuffer, bufferSize); + + // Write full buffer + std::vector writeData(bufferSize); + for (size_t i = 0; i < bufferSize; ++i) + writeData[i] = static_cast(i); + + buffer.Write(std::span(writeData), 0); + + // Read full buffer + std::vector readData(bufferSize); + auto result = buffer.Read(0, bufferSize, std::span(readData)); + + EXPECT_EQ(result.size(), bufferSize); + for (size_t i = 0; i < bufferSize; ++i) + EXPECT_EQ(result[i], writeData[i]); + + delete[] fullBuffer; +} + +TEST_F(CircularBufferTest, OverwritePreviousData) +{ + CircularBuffer buffer(testBuffer, 10); + + // First write + unsigned char data1[] = {1, 2, 3, 4, 5}; + buffer.Write(std::span(data1, 5), 0); + + // Overwrite with different data + unsigned char data2[] = {10, 20, 30}; + buffer.Write(std::span(data2, 3), 0); + + unsigned char readBuffer[5]; + auto result = buffer.Read(0, 5, std::span(readBuffer, 5)); + + EXPECT_EQ(result[0], 10); + EXPECT_EQ(result[1], 20); + EXPECT_EQ(result[2], 30); + EXPECT_EQ(result[3], 4); // Original data + EXPECT_EQ(result[4], 5); // Original data +} + +TEST_F(CircularBufferTest, AlternatingWriteAndClear) +{ + CircularBuffer buffer(testBuffer, 20); + + unsigned char data[] = {1, 2, 3, 4, 5}; + + buffer.Write(std::span(data, 5), 0); + buffer.Clear(0, 5); + + buffer.Write(std::span(data, 5), 5); + buffer.Clear(5, 5); + + buffer.Write(std::span(data, 5), 10); + + // Only the last write should remain + unsigned char readBuffer[20]; + auto result = buffer.Read(0, 20, std::span(readBuffer, 20)); + + for (size_t i = 0; i < 10; ++i) + EXPECT_EQ(result[i], 0); + + for (size_t i = 10; i < 15; ++i) + EXPECT_EQ(result[i], data[i - 10]); +} + +// ===== Regression Tests (from CircularBuffer bug fixes) ===== + +TEST_F(CircularBufferTest, SpanNotWrittenAsObject) +{ + // Regression test: Ensure std::span itself isn't written as an object + // This was a bug where the template overload was matching span objects + + CircularBuffer buffer(testBuffer, 20); + + unsigned char expectedData[] = {0xAA, 0xBB, 0xCC, 0xDD}; + std::span dataSpan(expectedData, 4); + + buffer.Write(dataSpan, 0); + + // Verify the actual data was written, not the span object + unsigned char readBuffer[4]; + auto result = buffer.Read(0, 4, std::span(readBuffer, 4)); + + EXPECT_EQ(result.size(), 4); + EXPECT_EQ(result[0], 0xAA); + EXPECT_EQ(result[1], 0xBB); + EXPECT_EQ(result[2], 0xCC); + EXPECT_EQ(result[3], 0xDD); + + // Verify we didn't write a span object by checking the next bytes are still zero + auto nextBytes = buffer.Read(4, 16, std::span(readBuffer, 4)); + for (size_t i = 0; i < std::min(size_t(4), nextBytes.size()); ++i) + EXPECT_EQ(nextBytes[i], 0); +} + +TEST_F(CircularBufferTest, OffsetHandlingConsistency) +{ + // Regression test: Ensure offset handling is consistent across operations + + CircularBuffer buffer(testBuffer, 10); + + unsigned char data[] = {1, 2, 3}; + + // Write at various offsets and verify wrapping is consistent + buffer.Write(std::span(data, 3), 0); + buffer.Write(std::span(data, 3), 10); + buffer.Write(std::span(data, 3), 20); + + // All writes should have gone to offset 0 due to wrapping + unsigned char readBuffer[3]; + auto result = buffer.Read(0, 3, std::span(readBuffer, 3)); + + for (size_t i = 0; i < 3; ++i) + EXPECT_EQ(result[i], data[i]); +} + +// ===== Additional Data-Driven Tests ===== +// These tests use parameterized test data to comprehensively validate behavior + +class CircularBufferDataDrivenTests : public ::testing::Test { +protected: + // Test data similar to C# tests + static const std::vector ByteArray; + static const std::vector ByteArray1; + static const std::vector ByteArray2; + static const std::vector ByteArray3; +}; + +const std::vector CircularBufferDataDrivenTests::ByteArray = {100, 110, 120}; +const std::vector CircularBufferDataDrivenTests::ByteArray1 = {100}; +const std::vector CircularBufferDataDrivenTests::ByteArray2 = {100, 110}; +const std::vector CircularBufferDataDrivenTests::ByteArray3 = {100, 110, 120}; + +TEST_F(CircularBufferDataDrivenTests, ClearWithVariousOffsetsAndLengths) { + // Test data: {offset, length} + struct TestCase { + unsigned long long offset; + unsigned long long length; + }; + + std::vector testCases = { + {0, 0}, {0, 1}, {1, 1}, {2, 1}, {3, 1}, + {0, 2}, {1, 2}, {2, 2}, {3, 2}, + {0, 3}, {1, 3}, {2, 3}, {3, 3} + }; + + for (const auto& testCase : testCases) { + std::vector buffer = {1, 1, 1}; // Initialize with 1s + CircularBuffer circularBuffer(buffer.data(), buffer.size()); + + // Verify all bytes are initially 1 + if (testCase.length > 0) { + std::vector initialBuffer(testCase.length); + auto initialSpan = circularBuffer.Read(testCase.offset, testCase.length, std::span(initialBuffer)); + for (auto byte : initialSpan) { + EXPECT_EQ(byte, 1); + } + + // Clear the specified range + circularBuffer.Clear(testCase.offset, testCase.length); + + // Verify all bytes in the range are now 0 + std::vector clearedBuffer(testCase.length); + auto clearedSpan = circularBuffer.Read(testCase.offset, testCase.length, std::span(clearedBuffer)); + for (auto byte : clearedSpan) { + EXPECT_EQ(byte, 0); + } + } + } +} diff --git a/src/Interprocess.Native.Static.Tests/Interprocess.Native.Static.Tests.vcxproj b/src/Interprocess.Native.Static.Tests/Interprocess.Native.Static.Tests.vcxproj new file mode 100644 index 0000000..22af5e8 --- /dev/null +++ b/src/Interprocess.Native.Static.Tests/Interprocess.Native.Static.Tests.vcxproj @@ -0,0 +1,138 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {3a5b0009-eaae-4c4b-ba73-30e58d3dbc87} + Win32Proj + 10.0.26100.0 + Application + v143 + Unicode + + + + + + + + + + + Use + pch.h + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + EnableFastChecks + MultiThreadedDebug + Level3 + ..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + true + Console + + + + + Use + pch.h + Disabled + X64;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + EnableFastChecks + MultiThreadedDebug + Level3 + ..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + true + Console + + + + + Use + pch.h + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + MultiThreaded + Level3 + ProgramDatabase + ..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + true + Console + true + true + + + + + Use + pch.h + X64;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + MultiThreaded + Level3 + ProgramDatabase + ..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + true + Console + true + true + + + + + + + + + + + Create + Create + Create + Create + + + + + {a31ea597-838e-4445-953a-4cbdb01519f2} + + + + + + + + + + + + This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + \ No newline at end of file diff --git a/src/Interprocess.Native.Static.Tests/QueueTests.cpp b/src/Interprocess.Native.Static.Tests/QueueTests.cpp new file mode 100644 index 0000000..2d54df6 --- /dev/null +++ b/src/Interprocess.Native.Static.Tests/QueueTests.cpp @@ -0,0 +1,837 @@ +//****************************************************************************** +// Queue Comprehensive Test Suite +//****************************************************************************** +// +// Purpose: Comprehensive testing of Queue, Publisher, and Subscriber functionality +// to ensure data integrity, capacity management, and cross-instance behavior. +// +// Test Categories: +// - Basic Operations: Single message, empty queue, sequential messages +// - Capacity Management: Small messages, capacity limits, queue filling +// - Circular Wrapping: 100-iteration wrap-around with data integrity +// - Data Integrity: All byte values (0-255), alignment preservation +// - Variable Sizes: 1-256 bytes with alignment boundaries +// - Multiple Instances: Shared publishers/subscribers, isolation +// - High Frequency: 10000 rapid operations with queue dynamics +// - Cross-Instance: Memory-mapped file consistency, offset synchronization +// - Queue Components: Header structure, options validation +// +// Key Tests: +// - DataIntegrityAllByteValues: All 256 byte values preserved +// - CircularBufferWrapping: 100 iterations, 50-byte messages +// - VaryingMessageSizes: 1-256 bytes including alignment boundaries +// - HighFrequencyOperations: 10000 messages with dynamic filling +// - CrossInstanceQueueStateConsistency: Multi-instance state preservation +// +// Usage: +// All queue tests: --gtest_filter="*Queue*.*" +// Main suite: --gtest_filter="QueueTest.*" +// Allocation: --gtest_filter="QueueAllocationTestFixture.*" +// +//****************************************************************************** + +#include "pch.h" +#include "QueueFactory.h" +#include "QueueOptions.h" +#include "IPublisher.h" +#include "ISubscriber.h" +#include "Queue.h" +#include +#include +#include +#include +#include + +using namespace Cloudtoid::Interprocess; + +class QueueTest : public ::testing::Test +{ +protected: + void SetUp() override + { + // Generate unique queue name for each test to ensure isolation + queueName = L"test-queue-" + std::to_wstring( + std::chrono::system_clock::now().time_since_epoch().count()); + } + + std::unique_ptr CreatePublisher(unsigned long long capacity = 1024) + { + QueueOptions options(queueName, capacity); + QueueFactory factory; + return std::unique_ptr(factory.CreatePublisher(options)); + } + + std::unique_ptr CreateSubscriber(unsigned long long capacity = 1024) + { + QueueOptions options(queueName, capacity); + QueueFactory factory; + return std::unique_ptr(factory.CreateSubscriber(options)); + } + + std::wstring queueName; +}; + +// ===== Basic Enqueue/Dequeue Tests ===== + +TEST_F(QueueTest, EnqueueAndDequeueSingleMessage) +{ + auto publisher = CreatePublisher(1024); + auto subscriber = CreateSubscriber(1024); + + unsigned char sendData[] = {1, 2, 3, 4, 5}; + std::span sendSpan(sendData, 5); + + ASSERT_TRUE(publisher->TryEnqueue(sendSpan)); + + unsigned char receiveBuffer[10]; + std::span receiveSpan(receiveBuffer, 10); + std::span message; + + ASSERT_TRUE(subscriber->TryDequeue(receiveSpan, message)); + EXPECT_EQ(message.size(), 5); + for (size_t i = 0; i < 5; ++i) + EXPECT_EQ(message[i], sendData[i]); +} + +TEST_F(QueueTest, DequeueFromEmptyQueueReturnsFalse) +{ + // Test both with and without publisher to ensure no garbage data + auto publisher = CreatePublisher(1024); + auto subscriber = CreateSubscriber(1024); + + unsigned char receiveBuffer[10]; + std::span receiveSpan(receiveBuffer, 10); + std::span message; + + // Without enqueueing anything, verify dequeue returns false and message is empty + EXPECT_FALSE(subscriber->TryDequeue(receiveSpan, message)); + EXPECT_TRUE(message.empty()); +} + +TEST_F(QueueTest, SequentialMessages) +{ + auto publisher = CreatePublisher(2048); // Need larger capacity for many messages + auto subscriber = CreateSubscriber(2048); + + // Send messages with sequential values 0-99 + for (int i = 0; i < 100; ++i) + { + unsigned char value = static_cast(i); + ASSERT_TRUE(publisher->TryEnqueue(std::span(&value, 1))) + << "Failed to enqueue message " << i; + } + + // Verify sequential values + unsigned char receiveBuffer[10]; + std::span receiveSpan(receiveBuffer, 10); + std::span message; + + for (int i = 0; i < 100; ++i) + { + ASSERT_TRUE(subscriber->TryDequeue(receiveSpan, message)) + << "Failed to dequeue message " << i; + ASSERT_EQ(message.size(), 1) << "Wrong message size at " << i; + EXPECT_EQ(message[0], static_cast(i)) + << "Wrong value at message " << i; + } +} + +// ===== Capacity Tests ===== + +TEST_F(QueueTest, LargeNumberOfSmallMessages) +{ + // Test queue behavior with many small messages + auto publisher = CreatePublisher(4096); + auto subscriber = CreateSubscriber(4096); + + // Enqueue many small messages + int enqueueCount = 0; + for (int i = 0; i < 500; ++i) + { + unsigned char value = static_cast(i % 256); + if (publisher->TryEnqueue(std::span(&value, 1))) + enqueueCount++; + else + break; // Queue full + } + + EXPECT_GT(enqueueCount, 100) << "Should be able to queue at least 100 small messages"; + + // Dequeue and verify + unsigned char receiveBuffer[10]; + std::span message; + int dequeueCount = 0; + + for (int i = 0; i < enqueueCount; ++i) + { + ASSERT_TRUE(subscriber->TryDequeue(std::span(receiveBuffer, 10), message)) + << "Failed to dequeue message " << i; + EXPECT_EQ(message.size(), 1); + EXPECT_EQ(message[0], static_cast(i % 256)); + dequeueCount++; + } + + EXPECT_EQ(dequeueCount, enqueueCount) << "Should dequeue exactly as many as enqueued"; +} + +TEST_F(QueueTest, CapacityRespected) +{ + // Verify queue capacity is finite + auto publisher = CreatePublisher(512); + + unsigned char data[32]; + std::fill_n(data, 32, 0xFF); + + int successfulEnqueues = 0; + // Try to fill beyond capacity + for (int i = 0; i < 100; ++i) + { + if (!publisher->TryEnqueue(std::span(data, 32))) + break; + successfulEnqueues++; + } + + // Should have failed before 100 iterations (queue has finite capacity) + EXPECT_LT(successfulEnqueues, 100) << "Queue should eventually fill up"; + EXPECT_GT(successfulEnqueues, 0) << "Should be able to enqueue at least one message"; +} + +// ===== Circular Buffer Wrapping & Data Integrity Tests ===== + +TEST_F(QueueTest, CircularBufferWrapping) +{ + auto publisher = CreatePublisher(1024); + auto subscriber = CreateSubscriber(1024); + + unsigned char data[50]; + for (size_t i = 0; i < 50; ++i) + data[i] = static_cast(i); + + // Enqueue and dequeue many times to force wrapping + for (int iteration = 0; iteration < 100; ++iteration) + { + ASSERT_TRUE(publisher->TryEnqueue(std::span(data, 50))) + << "Failed to enqueue at iteration " << iteration; + + unsigned char receiveBuffer[50]; + std::span receiveSpan(receiveBuffer, 50); + std::span message; + + ASSERT_TRUE(subscriber->TryDequeue(receiveSpan, message)) + << "Failed to dequeue at iteration " << iteration; + + ASSERT_EQ(message.size(), 50) << "Wrong size at iteration " << iteration; + + for (size_t i = 0; i < 50; ++i) + { + EXPECT_EQ(message[i], data[i]) + << "Data mismatch at iteration " << iteration << ", byte " << i; + } + } +} + +TEST_F(QueueTest, DataIntegrityAllByteValues) +{ + auto publisher = CreatePublisher(2048); + auto subscriber = CreateSubscriber(2048); + + // Send all possible byte values + unsigned char allBytes[256]; + for (int i = 0; i < 256; ++i) + allBytes[i] = static_cast(i); + + ASSERT_TRUE(publisher->TryEnqueue(std::span(allBytes, 256))); + + unsigned char receiveBuffer[256]; + std::span receiveSpan(receiveBuffer, 256); + std::span message; + + ASSERT_TRUE(subscriber->TryDequeue(receiveSpan, message)); + EXPECT_EQ(message.size(), 256); + + for (int i = 0; i < 256; ++i) + EXPECT_EQ(message[i], static_cast(i)) + << "Mismatch at byte " << i; +} + +// ===== Variable Message Size Tests ===== + +TEST_F(QueueTest, VaryingMessageSizes) +{ + auto publisher = CreatePublisher(4096); + auto subscriber = CreateSubscriber(4096); + + // Test with different message sizes including single-byte and alignment boundaries + std::vector sizes = {1, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128, 255, 256}; + + for (size_t size : sizes) + { + std::vector data(size); + for (size_t i = 0; i < size; ++i) + data[i] = static_cast((i + size) % 256); + + ASSERT_TRUE(publisher->TryEnqueue(std::span(data.data(), size))) + << "Failed to enqueue message of size " << size; + } + + // Dequeue and verify + unsigned char receiveBuffer[512]; + std::span receiveSpan(receiveBuffer, 512); + std::span message; + + for (size_t size : sizes) + { + ASSERT_TRUE(subscriber->TryDequeue(receiveSpan, message)) + << "Failed to dequeue message of size " << size; + ASSERT_EQ(message.size(), size) + << "Wrong message size (expected " << size << ")"; + + for (size_t i = 0; i < size; ++i) + { + EXPECT_EQ(message[i], static_cast((i + size) % 256)) + << "Mismatch at position " << i << " for message size " << size; + } + } +} + +TEST_F(QueueTest, MaximumMessageSize) +{ + size_t capacity = 4096; + auto publisher = CreatePublisher(capacity); + auto subscriber = CreateSubscriber(capacity); + + // Create message that's close to capacity (accounting for header overhead) + size_t msgSize = capacity - 64; // Leave room for headers + std::vector data(msgSize); + for (size_t i = 0; i < msgSize; ++i) + data[i] = static_cast(i % 256); + + ASSERT_TRUE(publisher->TryEnqueue(std::span(data.data(), msgSize))); + + std::vector receiveBuffer(msgSize + 100); + std::span receiveSpan(receiveBuffer.data(), receiveBuffer.size()); + std::span message; + + ASSERT_TRUE(subscriber->TryDequeue(receiveSpan, message)); + EXPECT_EQ(message.size(), msgSize); + + for (size_t i = 0; i < msgSize; ++i) + EXPECT_EQ(message[i], static_cast(i % 256)); +} + +// ===== Multiple Publisher/Subscriber Tests ===== + +TEST_F(QueueTest, MultipleSubscribersShareReadPosition) +{ + // Multiple subscribers share the same read position (queue is FIFO, not broadcast) + auto publisher = CreatePublisher(1024); + auto subscriber1 = CreateSubscriber(1024); + auto subscriber2 = CreateSubscriber(1024); + + unsigned char data1[] = {10, 20, 30, 40}; + unsigned char data2[] = {50, 60, 70, 80}; + + ASSERT_TRUE(publisher->TryEnqueue(std::span(data1, 4))); + ASSERT_TRUE(publisher->TryEnqueue(std::span(data2, 4))); + + unsigned char receiveBuffer1[10], receiveBuffer2[10]; + std::span message1, message2; + + // Subscriber1 reads first message + ASSERT_TRUE(subscriber1->TryDequeue(std::span(receiveBuffer1, 10), message1)); + EXPECT_EQ(message1.size(), 4); + for (size_t i = 0; i < 4; ++i) + EXPECT_EQ(message1[i], data1[i]); + + // Subscriber2 should get the second message (not the first again) + ASSERT_TRUE(subscriber2->TryDequeue(std::span(receiveBuffer2, 10), message2)); + EXPECT_EQ(message2.size(), 4); + for (size_t i = 0; i < 4; ++i) + EXPECT_EQ(message2[i], data2[i]) << "Second subscriber should get second message"; +} + +TEST_F(QueueTest, MultiplePublishersCanEnqueue) +{ + auto publisher1 = CreatePublisher(1024); + auto publisher2 = CreatePublisher(1024); + auto subscriber = CreateSubscriber(1024); + + unsigned char data1[] = {1, 2, 3}; + unsigned char data2[] = {4, 5, 6}; + + ASSERT_TRUE(publisher1->TryEnqueue(std::span(data1, 3))); + ASSERT_TRUE(publisher2->TryEnqueue(std::span(data2, 3))); + + unsigned char receiveBuffer[10]; + std::span message; + + // Should be able to dequeue both messages + ASSERT_TRUE(subscriber->TryDequeue(std::span(receiveBuffer, 10), message)); + EXPECT_EQ(message.size(), 3); + + ASSERT_TRUE(subscriber->TryDequeue(std::span(receiveBuffer, 10), message)); + EXPECT_EQ(message.size(), 3); +} + +// ===== Buffer Boundary Tests ===== + +TEST_F(QueueTest, MessageAtExactBufferBoundary) +{ + size_t capacity = 128; + auto publisher = CreatePublisher(capacity); + auto subscriber = CreateSubscriber(capacity); + + // Fill buffer exactly to boundary + unsigned char data[64]; + std::fill_n(data, 64, 0xAA); + + ASSERT_TRUE(publisher->TryEnqueue(std::span(data, 64))); + + unsigned char receiveBuffer[64]; + std::span message; + + ASSERT_TRUE(subscriber->TryDequeue(std::span(receiveBuffer, 64), message)); + EXPECT_EQ(message.size(), 64); + + for (size_t i = 0; i < 64; ++i) + EXPECT_EQ(message[i], 0xAA); +} + +// ===== Edge Case & High Frequency Tests ===== + +TEST_F(QueueTest, HighFrequencyOperations) +{ + auto publisher = CreatePublisher(2048); + auto subscriber = CreateSubscriber(2048); + + const int messageCount = 10000; + + // Enqueue many messages rapidly + for (int i = 0; i < messageCount; ++i) + { + unsigned char value = static_cast(i % 256); + while (!publisher->TryEnqueue(std::span(&value, 1))) + { + // If full, dequeue one to make space + unsigned char receiveBuffer[1]; + std::span message; + subscriber->TryDequeue(std::span(receiveBuffer, 1), message); + } + } + + // Dequeue and verify + unsigned char receiveBuffer[1]; + std::span message; + int dequeueCount = 0; + + while (subscriber->TryDequeue(std::span(receiveBuffer, 1), message)) + { + EXPECT_EQ(message.size(), 1); + dequeueCount++; + } + + EXPECT_GT(dequeueCount, 0) << "Should have dequeued some messages"; +} + +// ===== QUEUE HEADER TESTS ===== + +TEST(QueueHeaderTests, IsEmpty) +{ + QueueHeader header; + header.ReadOffset = 0; + header.WriteOffset = 0; + EXPECT_TRUE(header.IsEmpty()); + + header.WriteOffset = 8; + EXPECT_FALSE(header.IsEmpty()); + + header.ReadOffset = 8; + EXPECT_TRUE(header.IsEmpty()); +} + +TEST(QueueHeaderTests, SizeAndLayout) +{ + // Verify the actual QueueHeader structure + EXPECT_EQ(sizeof(QueueHeader), 32); + EXPECT_EQ(offsetof(QueueHeader, ReadOffset), 0); + EXPECT_EQ(offsetof(QueueHeader, WriteOffset), 8); + EXPECT_EQ(offsetof(QueueHeader, ReadLockTimeStamp), 16); + EXPECT_EQ(offsetof(QueueHeader, Reserved), 24); +} + +// ===== QUEUE OPTIONS VALIDATION TESTS ===== + +TEST(QueueOptionsTests, ValidatesCapacity) +{ + // Test that capacity must be at least 16 bytes + EXPECT_THROW(QueueOptions(L"test", 8), std::invalid_argument); + EXPECT_THROW(QueueOptions(L"test", 15), std::invalid_argument); + + // Test that capacity must be a multiple of 8 + EXPECT_THROW(QueueOptions(L"test", 17), std::invalid_argument); + EXPECT_THROW(QueueOptions(L"test", 23), std::invalid_argument); + + // Test that empty queue name is not allowed + EXPECT_THROW(QueueOptions(L"", 64), std::invalid_argument); + + // Test valid options + EXPECT_NO_THROW(QueueOptions(L"valid-queue", 64)); + EXPECT_NO_THROW(QueueOptions(L"valid-queue", 1024)); +} + +TEST(QueueOptionsTests, StoresValuesCorrectly) +{ + QueueOptions options(L"test-queue", 1024); + EXPECT_EQ(options.GetQueueName(), L"test-queue"); + EXPECT_EQ(options.GetCapacity(), 1024ULL); +} + +// ===== QUEUE ALLOCATION AND CROSS-INSTANCE TESTS ===== + +namespace QueueAllocationTests +{ + class QueueAllocationTestFixture : public ::testing::Test + { + protected: + std::string GenerateUniqueQueueName(const std::string& prefix = "allocation_test") + { + auto timestamp = std::chrono::duration_cast( + std::chrono::steady_clock::now().time_since_epoch()).count(); + return prefix + "_" + std::to_string(timestamp); + } + + QueueOptions CreateOptions(const std::string& queueName, size_t capacity = 1024 * 1024) + { + // Convert std::string to std::wstring + std::wstring wQueueName(queueName.begin(), queueName.end()); + return QueueOptions(wQueueName, capacity); + } + }; + + // Test 1: Verify same queue header is accessed by multiple Publishers + TEST_F(QueueAllocationTestFixture, MultiplePublishersSameQueueHeaderAccess) + { + auto queueName = GenerateUniqueQueueName("multi_pub"); + auto options = CreateOptions(queueName); + + QueueFactory factory; + + // Create first publisher + std::unique_ptr publisher1(factory.CreatePublisher(options)); + + // Send a message to advance WriteOffset + unsigned char testData = 123; + std::span message(&testData, 1); + ASSERT_TRUE(publisher1->TryEnqueue(message)) << "First publisher should be able to send"; + + // Create second publisher with same queue name + std::unique_ptr publisher2(factory.CreatePublisher(options)); + + // Second publisher should see the updated WriteOffset from first publisher + // Try to send another message - this should work if they share the same queue + unsigned char testData2 = 124; + std::span message2(&testData2, 1); + EXPECT_TRUE(publisher2->TryEnqueue(message2)) << "Second publisher should access same queue"; + + // Verify with subscriber that both messages are there + std::unique_ptr subscriber(factory.CreateSubscriber(options)); + + std::vector buffer(1024); + std::span receivedMessage; + + // Should receive first message + ASSERT_TRUE(subscriber->TryDequeue(buffer, receivedMessage)); + EXPECT_EQ(receivedMessage.size(), 1); + + // Should receive second message + ASSERT_TRUE(subscriber->TryDequeue(buffer, receivedMessage)); + EXPECT_EQ(receivedMessage.size(), 1); + } + + // Test 2: Verify same queue header is accessed by multiple Subscribers + TEST_F(QueueAllocationTestFixture, MultipleSubscribersSameQueueHeaderAccess) + { + auto queueName = GenerateUniqueQueueName("multi_sub"); + auto options = CreateOptions(queueName); + + QueueFactory factory; + + // Send multiple messages + std::unique_ptr publisher(factory.CreatePublisher(options)); + + std::vector testData = { 100, 101, 102 }; + for (auto data : testData) + { + std::span message(&data, 1); + ASSERT_TRUE(publisher->TryEnqueue(message)); + } + + // Create first subscriber and consume one message + std::unique_ptr subscriber1(factory.CreateSubscriber(options)); + + std::vector buffer(1024); + std::span receivedMessage; + + ASSERT_TRUE(subscriber1->TryDequeue(buffer, receivedMessage)); + EXPECT_EQ(receivedMessage.size(), 1); + + // Create second subscriber - should see updated ReadOffset + std::unique_ptr subscriber2(factory.CreateSubscriber(options)); + + // Second subscriber should get the second message (not the first one again) + ASSERT_TRUE(subscriber2->TryDequeue(buffer, receivedMessage)); + EXPECT_EQ(receivedMessage.size(), 1); + + // Third message should still be available + EXPECT_TRUE(subscriber2->TryDequeue(buffer, receivedMessage)); + EXPECT_EQ(receivedMessage.size(), 1); + } + + // Test 3: Publisher-Subscriber queue offset synchronization + TEST_F(QueueAllocationTestFixture, PublisherSubscriberOffsetSynchronization) + { + auto queueName = GenerateUniqueQueueName("sync_test"); + auto options = CreateOptions(queueName); + + QueueFactory factory; + + // Create publisher and subscriber simultaneously + std::unique_ptr publisher(factory.CreatePublisher(options)); + std::unique_ptr subscriber(factory.CreateSubscriber(options)); + + std::vector buffer(1024); + std::span receivedMessage; + + // Queue should be empty initially + EXPECT_FALSE(subscriber->TryDequeue(buffer, receivedMessage)) << "Empty queue should return false"; + + // Send a sequence of messages and verify each one + for (int i = 0; i < 10; ++i) + { + unsigned char testValue = static_cast(50 + i); // 50, 51, 52, ... + std::span message(&testValue, 1); + + ASSERT_TRUE(publisher->TryEnqueue(message)) << "Should be able to send message " << i; + + // Immediately try to receive it + ASSERT_TRUE(subscriber->TryDequeue(buffer, receivedMessage)) << "Should be able to receive message " << i; + ASSERT_EQ(receivedMessage.size(), 1) << "Should receive exactly 1 byte"; + EXPECT_EQ(receivedMessage[0], testValue) << "Message " << i << " data should match. Expected: " + << static_cast(testValue) << ", Got: " << static_cast(receivedMessage[0]); + } + } + + // Test 4: Memory-mapped file consistency across instances + TEST_F(QueueAllocationTestFixture, MemoryMappedFileConsistency) + { + auto queueName = GenerateUniqueQueueName("mmf_consistency"); + auto options = CreateOptions(queueName); + + QueueFactory factory; + + // Pattern: Write with one instance, read with another, repeat + std::vector sentValues; + std::vector receivedValues; + + for (int iteration = 0; iteration < 5; ++iteration) + { + // Create new publisher instance each time + std::unique_ptr publisher(factory.CreatePublisher(options)); + + unsigned char testValue = static_cast(70 + iteration); + sentValues.push_back(testValue); + std::span message(&testValue, 1); + + ASSERT_TRUE(publisher->TryEnqueue(message)) << "Iteration " << iteration << " send failed"; + + // Create new subscriber instance each time + std::unique_ptr subscriber(factory.CreateSubscriber(options)); + + std::vector buffer(1024); + std::span receivedMessage; + + ASSERT_TRUE(subscriber->TryDequeue(buffer, receivedMessage)) << "Iteration " << iteration << " receive failed"; + ASSERT_EQ(receivedMessage.size(), 1); + + receivedValues.push_back(receivedMessage[0]); + + std::cout << "Iteration " << iteration << " - Sent: " << static_cast(testValue) + << ", Received: " << static_cast(receivedMessage[0]) << std::endl; + } + + // Verify all values match + ASSERT_EQ(sentValues.size(), receivedValues.size()); + for (size_t i = 0; i < sentValues.size(); ++i) + { + EXPECT_EQ(sentValues[i], receivedValues[i]) << "Mismatch at iteration " << i + << " - Expected: " << static_cast(sentValues[i]) + << ", Got: " << static_cast(receivedValues[i]); + } + } + + // Test 5: Queue name isolation - different names should be different queues + TEST_F(QueueAllocationTestFixture, QueueNameIsolation) + { + auto queueName1 = GenerateUniqueQueueName("isolation1"); + auto queueName2 = GenerateUniqueQueueName("isolation2"); + + auto options1 = CreateOptions(queueName1); + auto options2 = CreateOptions(queueName2); + + QueueFactory factory; + + // Create publishers for different queues + std::unique_ptr publisher1(factory.CreatePublisher(options1)); + std::unique_ptr publisher2(factory.CreatePublisher(options2)); + + // Send different values to each queue + unsigned char value1 = 200; + unsigned char value2 = 201; + + std::span message1(&value1, 1); + std::span message2(&value2, 1); + + ASSERT_TRUE(publisher1->TryEnqueue(message1)); + ASSERT_TRUE(publisher2->TryEnqueue(message2)); + + // Create subscribers for each queue + std::unique_ptr subscriber1(factory.CreateSubscriber(options1)); + std::unique_ptr subscriber2(factory.CreateSubscriber(options2)); + + std::vector buffer(1024); + std::span receivedMessage; + + // Each subscriber should only see messages from its own queue + ASSERT_TRUE(subscriber1->TryDequeue(buffer, receivedMessage)); + EXPECT_EQ(receivedMessage[0], value1) << "Queue 1 should receive its own message"; + + ASSERT_TRUE(subscriber2->TryDequeue(buffer, receivedMessage)); + EXPECT_EQ(receivedMessage[0], value2) << "Queue 2 should receive its own message"; + + // Queues should be empty now + EXPECT_FALSE(subscriber1->TryDequeue(buffer, receivedMessage)) << "Queue 1 should be empty"; + EXPECT_FALSE(subscriber2->TryDequeue(buffer, receivedMessage)) << "Queue 2 should be empty"; + } + + // Test 6: Detect the odd/even pattern issue + TEST_F(QueueAllocationTestFixture, OddEvenPatternDetection) + { + auto queueName = GenerateUniqueQueueName("odd_even"); + auto options = CreateOptions(queueName); + + QueueFactory factory; + std::unique_ptr publisher(factory.CreatePublisher(options)); + std::unique_ptr subscriber(factory.CreateSubscriber(options)); + + std::vector buffer(1024); + std::span receivedMessage; + + // Test pattern: send odd numbers, see what we get back + std::vector oddNumbers = { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49 }; + std::vector receivedNumbers; + + for (auto oddValue : oddNumbers) + { + std::span message(&oddValue, 1); + + ASSERT_TRUE(publisher->TryEnqueue(message)) << "Failed to send odd number: " << static_cast(oddValue); + ASSERT_TRUE(subscriber->TryDequeue(buffer, receivedMessage)) << "Failed to receive message for odd number: " << static_cast(oddValue); + ASSERT_EQ(receivedMessage.size(), 1); + + receivedNumbers.push_back(receivedMessage[0]); + + std::cout << "Sent odd: " << static_cast(oddValue) + << " (0x" << std::hex << static_cast(oddValue) << ")" + << ", Received: " << std::dec << static_cast(receivedMessage[0]) + << " (0x" << std::hex << static_cast(receivedMessage[0]) << ")" + << ", Parity: " << ((receivedMessage[0] % 2 == 0) ? "EVEN" : "ODD") << std::dec << std::endl; + } + + // Analyze the pattern + int oddReceived = 0; + int evenReceived = 0; + + for (size_t i = 0; i < oddNumbers.size(); ++i) + { + if (receivedNumbers[i] % 2 == 0) + evenReceived++; + else + oddReceived++; + + // The received number should match the sent number + EXPECT_EQ(receivedNumbers[i], oddNumbers[i]) << "Mismatch at index " << i + << " - Sent: " << static_cast(oddNumbers[i]) + << ", Received: " << static_cast(receivedNumbers[i]); + } + + std::cout << "Pattern analysis: Sent " << oddNumbers.size() << " odd numbers, " + << "Received " << oddReceived << " odd, " << evenReceived << " even" << std::endl; + + // All received numbers should be odd (matching what we sent) + EXPECT_EQ(oddReceived, oddNumbers.size()) << "All received numbers should be odd since we sent odd numbers"; + EXPECT_EQ(evenReceived, 0) << "No even numbers should be received when sending odd numbers"; + } + + // Test 7: Cross-instance queue state consistency + TEST_F(QueueAllocationTestFixture, CrossInstanceQueueStateConsistency) + { + auto queueName = GenerateUniqueQueueName("state_consistency"); + auto options = CreateOptions(queueName); + + QueueFactory factory; + + // Fill queue with a single publisher to avoid instance lifecycle issues + std::vector allSentValues; + + // Use a single publisher instance for all messages + std::unique_ptr publisher(factory.CreatePublisher(options)); + + for (int pubIndex = 0; pubIndex < 3; ++pubIndex) + { + for (int msgIndex = 0; msgIndex < 5; ++msgIndex) + { + unsigned char value = static_cast(100 + (pubIndex * 10) + msgIndex); + allSentValues.push_back(value); + + std::span message(&value, 1); + ASSERT_TRUE(publisher->TryEnqueue(message)) << "Publisher batch " << pubIndex << ", message " << msgIndex; + } + } + + // Drain queue with a single subscriber to match the single publisher pattern + std::vector allReceivedValues; + + // Use a single subscriber instance for all messages + std::unique_ptr subscriber(factory.CreateSubscriber(options)); + + for (int subIndex = 0; subIndex < 3; ++subIndex) + { + for (int msgIndex = 0; msgIndex < 5; ++msgIndex) + { + std::vector buffer(1024); + std::span receivedMessage; + + ASSERT_TRUE(subscriber->TryDequeue(buffer, receivedMessage)) << "Batch " << subIndex << ", message " << msgIndex; + ASSERT_EQ(receivedMessage.size(), 1); + + allReceivedValues.push_back(receivedMessage[0]); + } + } + + // Verify order and values are preserved + ASSERT_EQ(allSentValues.size(), allReceivedValues.size()); + + for (size_t i = 0; i < allSentValues.size(); ++i) + { + EXPECT_EQ(allSentValues[i], allReceivedValues[i]) << "Message order/value mismatch at position " << i + << " - Expected: " << static_cast(allSentValues[i]) + << ", Got: " << static_cast(allReceivedValues[i]); + } + + // Queue should be empty now + std::unique_ptr finalSubscriber(factory.CreateSubscriber(options)); + std::vector buffer(1024); + std::span receivedMessage; + EXPECT_FALSE(finalSubscriber->TryDequeue(buffer, receivedMessage)) << "Queue should be empty after draining"; + } +} \ No newline at end of file diff --git a/src/Interprocess.Native.Static.Tests/SemaphoreTests.cpp b/src/Interprocess.Native.Static.Tests/SemaphoreTests.cpp new file mode 100644 index 0000000..f787a56 --- /dev/null +++ b/src/Interprocess.Native.Static.Tests/SemaphoreTests.cpp @@ -0,0 +1,272 @@ +//****************************************************************************** +// Semaphore Test Suite +//****************************************************************************** +// +// Purpose: Testing the SemaphoreWindows implementation for cross-process +// synchronization using Windows semaphore primitives. +// +// Test Categories: +// - Basic Operations: Creation, release, wait +// - Cross-Thread: Multi-threaded signaling and waiting +// - Timeout Behavior: Wait with timeouts +// - Error Handling: Invalid operations +// +//****************************************************************************** + +#include "pch.h" +#include "SemaphoreWindows.h" +#include +#include +#include + +using namespace Cloudtoid::Interprocess::Semaphore::Windows; + +class SemaphoreTests : public ::testing::Test +{ +protected: + std::wstring GenerateUniqueName() + { + auto timestamp = std::chrono::duration_cast( + std::chrono::steady_clock::now().time_since_epoch()).count(); + return L"sem_test_" + std::to_wstring(timestamp); + } +}; + +// ===== BASIC OPERATIONS ===== + +TEST_F(SemaphoreTests, CanCreateSemaphore) +{ + auto name = GenerateUniqueName(); + + // Should not throw + EXPECT_NO_THROW({ + SemaphoreWindows sem(name); + }); +} + +TEST_F(SemaphoreTests, ReleaseIncrementsSemaphore) +{ + auto name = GenerateUniqueName(); + SemaphoreWindows sem(name); + + // Release should succeed + EXPECT_NO_THROW(sem.Release()); + + // Should be able to wait immediately (non-blocking) since we released + EXPECT_TRUE(sem.Wait(0)) << "Wait should succeed immediately after Release"; +} + +TEST_F(SemaphoreTests, WaitWithoutReleaseTimesOut) +{ + auto name = GenerateUniqueName(); + SemaphoreWindows sem(name); + + // Wait with short timeout should fail (nothing released) + EXPECT_FALSE(sem.Wait(10)) << "Wait should timeout when no Release has been called"; +} + +TEST_F(SemaphoreTests, MultipleReleasesAllowMultipleWaits) +{ + auto name = GenerateUniqueName(); + SemaphoreWindows sem(name); + + // Release 3 times + sem.Release(); + sem.Release(); + sem.Release(); + + // Should be able to wait 3 times without blocking + EXPECT_TRUE(sem.Wait(0)) << "First wait should succeed"; + EXPECT_TRUE(sem.Wait(0)) << "Second wait should succeed"; + EXPECT_TRUE(sem.Wait(0)) << "Third wait should succeed"; + + // Fourth wait should timeout + EXPECT_FALSE(sem.Wait(10)) << "Fourth wait should timeout"; +} + +// ===== CROSS-THREAD SYNCHRONIZATION ===== + +TEST_F(SemaphoreTests, CrossThreadSignaling) +{ + auto name = GenerateUniqueName(); + SemaphoreWindows sem(name); + + bool threadCompleted = false; + + // Start thread that waits for signal + std::thread waiter([&]() { + // Wait for up to 5 seconds + bool signaled = sem.Wait(5000); + EXPECT_TRUE(signaled) << "Thread should receive signal"; + threadCompleted = true; + }); + + // Give thread time to start waiting + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + // Release semaphore to signal the waiting thread + sem.Release(); + + // Wait for thread to complete + waiter.join(); + + EXPECT_TRUE(threadCompleted) << "Thread should have completed"; +} + +TEST_F(SemaphoreTests, ProducerConsumerPattern) +{ + auto name = GenerateUniqueName(); + SemaphoreWindows sem(name); + + const int itemCount = 5; + std::atomic itemsConsumed{0}; + + // Consumer thread + std::thread consumer([&]() { + for (int i = 0; i < itemCount; ++i) + { + // Wait for producer to signal (up to 5 seconds per item) + bool received = sem.Wait(5000); + EXPECT_TRUE(received) << "Consumer should receive signal for item " << i; + if (received) + { + itemsConsumed++; + } + } + }); + + // Producer thread + std::thread producer([&]() { + for (int i = 0; i < itemCount; ++i) + { + // Simulate work + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + + // Signal consumer that item is ready + sem.Release(); + } + }); + + producer.join(); + consumer.join(); + + EXPECT_EQ(itemsConsumed, itemCount) << "All items should be consumed"; +} + +TEST_F(SemaphoreTests, MultipleThreadsWaitingForSignal) +{ + auto name = GenerateUniqueName(); + SemaphoreWindows sem(name); + + const int threadCount = 3; + std::atomic threadsCompleted{0}; + std::vector threads; + + // Start multiple waiting threads + for (int i = 0; i < threadCount; ++i) + { + threads.emplace_back([&]() { + bool signaled = sem.Wait(5000); + EXPECT_TRUE(signaled); + if (signaled) + { + threadsCompleted++; + } + }); + } + + // Give threads time to start waiting + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + // Release semaphore multiple times to wake all threads + for (int i = 0; i < threadCount; ++i) + { + sem.Release(); + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + + // Wait for all threads + for (auto& thread : threads) + { + thread.join(); + } + + EXPECT_EQ(threadsCompleted, threadCount) << "All threads should complete"; +} + +// ===== TIMEOUT BEHAVIOR ===== + +TEST_F(SemaphoreTests, WaitTimeoutIsAccurate) +{ + auto name = GenerateUniqueName(); + SemaphoreWindows sem(name); + + // Measure how long a 100ms timeout actually takes + auto start = std::chrono::steady_clock::now(); + bool result = sem.Wait(100); + auto duration = std::chrono::steady_clock::now() - start; + auto ms = std::chrono::duration_cast(duration).count(); + + EXPECT_FALSE(result) << "Wait should timeout"; + EXPECT_GE(ms, 90) << "Timeout should be at least 90ms"; + EXPECT_LE(ms, 200) << "Timeout should be no more than 200ms (allowing for scheduling)"; +} + +TEST_F(SemaphoreTests, ZeroTimeoutIsNonBlocking) +{ + auto name = GenerateUniqueName(); + SemaphoreWindows sem(name); + + // Zero timeout should return immediately + auto start = std::chrono::steady_clock::now(); + bool result = sem.Wait(0); + auto duration = std::chrono::steady_clock::now() - start; + auto ms = std::chrono::duration_cast(duration).count(); + + EXPECT_FALSE(result) << "Wait should fail immediately"; + EXPECT_LT(ms, 10) << "Wait(0) should return in less than 10ms"; +} + +// ===== CROSS-INSTANCE BEHAVIOR ===== + +TEST_F(SemaphoreTests, MultipleSemaphoreInstancesShareState) +{ + auto name = GenerateUniqueName(); + + // Create two instances with same name + SemaphoreWindows sem1(name); + SemaphoreWindows sem2(name); + + // Release on first instance + sem1.Release(); + + // Wait on second instance should succeed + EXPECT_TRUE(sem2.Wait(100)) << "Second instance should see release from first instance"; + + // Another wait should timeout (only one release) + EXPECT_FALSE(sem2.Wait(10)) << "Should timeout after consuming the single release"; +} + +TEST_F(SemaphoreTests, SemaphoreResetWhenAllHandlesClosed) +{ + auto name = GenerateUniqueName(); + + // Create and release in first instance + { + SemaphoreWindows sem(name); + sem.Release(); + sem.Release(); + } // Instance destroyed - semaphore is destroyed when last handle closes + + // Create new instance with same name - creates NEW semaphore + { + SemaphoreWindows sem(name); + + // New semaphore should be in initial state (count = 0) + EXPECT_FALSE(sem.Wait(10)) << "New semaphore should start at count 0"; + + // Release and verify it works + sem.Release(); + EXPECT_TRUE(sem.Wait(0)) << "Should be able to wait after releasing new semaphore"; + } +} \ No newline at end of file diff --git a/src/Interprocess.Native.Static.Tests/packages.config b/src/Interprocess.Native.Static.Tests/packages.config new file mode 100644 index 0000000..a71561e --- /dev/null +++ b/src/Interprocess.Native.Static.Tests/packages.config @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/src/Interprocess.Native.Static.Tests/pch.cpp b/src/Interprocess.Native.Static.Tests/pch.cpp new file mode 100644 index 0000000..250fb27 --- /dev/null +++ b/src/Interprocess.Native.Static.Tests/pch.cpp @@ -0,0 +1,5 @@ +// +// pch.cpp +// + +#include "pch.h" diff --git a/src/Interprocess.Native.Static.Tests/pch.h b/src/Interprocess.Native.Static.Tests/pch.h new file mode 100644 index 0000000..0572a70 --- /dev/null +++ b/src/Interprocess.Native.Static.Tests/pch.h @@ -0,0 +1,7 @@ +// +// pch.h +// + +#pragma once + +#include "gtest/gtest.h" diff --git a/src/Interprocess.Native.Static/CircularBuffer.h b/src/Interprocess.Native.Static/CircularBuffer.h new file mode 100644 index 0000000..f37fd67 --- /dev/null +++ b/src/Interprocess.Native.Static/CircularBuffer.h @@ -0,0 +1,122 @@ +#pragma once + +#include +#include + +namespace Cloudtoid::Interprocess +{ + class CircularBuffer + { + public: + CircularBuffer(unsigned char* buffer, const unsigned long long capacity) : + _capacity{capacity}, + _buffer{buffer} + { + } + + [[nodiscard]] + unsigned long long GetCapacity() const + { + return _capacity; + } + + [[nodiscard]] + unsigned char* GetPointer(const unsigned long long offset) const + { + const auto adjustedOffset = AdjustedOffset(offset); + return _buffer + adjustedOffset; + } + + [[nodiscard]] + std::span Read(const unsigned long long offset, + unsigned long long length, + const std::span resultBuffer) const + { + if (length == 0) + { + return std::span{}; // empty + } + + auto result = resultBuffer; + length = std::min(length, result.size()); + + const auto adjustedOffset = AdjustedOffset(offset); + + const auto resultBufferPtr = result.data(); + const auto sourcePtr = _buffer + adjustedOffset; + + const auto rightLength = std::min(_capacity - adjustedOffset, length); + if (rightLength > 0) + { + std::copy_n(sourcePtr, rightLength, resultBufferPtr); + } + + const auto leftLength = length - rightLength; + if (leftLength > 0) + { + std::copy_n(_buffer, leftLength, resultBufferPtr + rightLength); + } + + return result.subspan(0, length); + } + + void Write(const std::span source, const unsigned long long offset) const + { + Write(source.data(), source.size(), offset); + } + + template + requires (!std::is_same_v, std::span>) + void Write(const T& source, const unsigned long long offset) + { + static_assert(std::is_trivially_copyable_v, "T must be trivially copyable"); + Write(reinterpret_cast(&source), sizeof(T), offset); + } + + void Clear(const unsigned long long offset, const unsigned long long length) const + { + if (length == 0) + { + return; + } + + const auto adjustedOffset = AdjustedOffset(offset); + const auto rightLength = std::min(_capacity - adjustedOffset, length); + std::memset(_buffer + adjustedOffset, 0, rightLength); + + const auto leftLength = length - rightLength; + if (leftLength > 0) + { + std::memset(_buffer, 0, leftLength); + } + } + + [[nodiscard]] + unsigned long long AdjustedOffset(const unsigned long long offset) const + { + return offset % _capacity; + } + + void Write(const unsigned char* source, const unsigned long long length, const unsigned long long offset) const + { + if (length == 0) + { + return; + } + + const auto adjustedOffset = AdjustedOffset(offset); + const auto rightLength = std::min(_capacity - adjustedOffset, length); + std::copy_n(source, rightLength, _buffer + adjustedOffset); + + const auto leftLength = length - rightLength; + if (leftLength > 0) + { + std::copy_n(source + rightLength, leftLength, _buffer); + } + } + + private: + unsigned long long _capacity; + unsigned char* _buffer; + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/IInterprocessSemaphoreReleaser.h b/src/Interprocess.Native.Static/IInterprocessSemaphoreReleaser.h new file mode 100644 index 0000000..73ee9dd --- /dev/null +++ b/src/Interprocess.Native.Static/IInterprocessSemaphoreReleaser.h @@ -0,0 +1,20 @@ +#pragma once + +namespace Cloudtoid::Interprocess +{ + class IInterprocessSemaphoreReleaser + { + public: + virtual ~IInterprocessSemaphoreReleaser() = default; + + IInterprocessSemaphoreReleaser(const IInterprocessSemaphoreReleaser&) = default; + IInterprocessSemaphoreReleaser& operator=(const IInterprocessSemaphoreReleaser&) = default; + IInterprocessSemaphoreReleaser(IInterprocessSemaphoreReleaser&&) = default; + IInterprocessSemaphoreReleaser& operator=(IInterprocessSemaphoreReleaser&&) = default; + + virtual void Release() = 0; + + protected: + IInterprocessSemaphoreReleaser() = default; + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/IInterprocessSemaphoreWaiter.h b/src/Interprocess.Native.Static/IInterprocessSemaphoreWaiter.h new file mode 100644 index 0000000..6fb0cfe --- /dev/null +++ b/src/Interprocess.Native.Static/IInterprocessSemaphoreWaiter.h @@ -0,0 +1,20 @@ +#pragma once + +namespace Cloudtoid::Interprocess +{ + class IInterprocessSemaphoreWaiter + { + public: + virtual ~IInterprocessSemaphoreWaiter() = default; + + IInterprocessSemaphoreWaiter(const IInterprocessSemaphoreWaiter&) = default; + IInterprocessSemaphoreWaiter& operator=(const IInterprocessSemaphoreWaiter&) = default; + IInterprocessSemaphoreWaiter(IInterprocessSemaphoreWaiter&&) = default; + IInterprocessSemaphoreWaiter& operator=(IInterprocessSemaphoreWaiter&&) = default; + + virtual bool Wait(int millisecondTimeout) = 0; + + protected: + IInterprocessSemaphoreWaiter() = default; + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/IMemoryFile.h b/src/Interprocess.Native.Static/IMemoryFile.h new file mode 100644 index 0000000..9abcca5 --- /dev/null +++ b/src/Interprocess.Native.Static/IMemoryFile.h @@ -0,0 +1,27 @@ +#pragma once + +namespace Cloudtoid::Interprocess +{ + class IMemoryFile + { + public: + virtual ~IMemoryFile() = 0; + IMemoryFile(const IMemoryFile&) = delete; + IMemoryFile& operator=(const IMemoryFile&) = delete; + IMemoryFile(IMemoryFile&&) = default; + IMemoryFile& operator=(IMemoryFile&&) = default; + + [[nodiscard]] + void* GetMappedFile() const noexcept + { + return _mappedFile; + } + + protected: + IMemoryFile() = default; + void* _mappedFile = nullptr; + }; + + // Even pure virtual destructors need an implementation + inline IMemoryFile::~IMemoryFile() = default; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/IPublisher.h b/src/Interprocess.Native.Static/IPublisher.h new file mode 100644 index 0000000..51dd7f2 --- /dev/null +++ b/src/Interprocess.Native.Static/IPublisher.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace Cloudtoid::Interprocess +{ + /// + /// Message publisher that publishes messages to the subscribers. + /// + class IPublisher + { + protected: + IPublisher() = default; + public: + virtual ~IPublisher() = default; + IPublisher(const IPublisher&) = default; + IPublisher& operator=(const IPublisher&) = default; + IPublisher(IPublisher&&) = default; + IPublisher& operator=(IPublisher&&) = default; + + /// Enqueues the message to be published to the subscribers. + virtual bool TryEnqueue(std::span message) = 0; + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/IQueueFactory.h b/src/Interprocess.Native.Static/IQueueFactory.h new file mode 100644 index 0000000..cdcf11a --- /dev/null +++ b/src/Interprocess.Native.Static/IQueueFactory.h @@ -0,0 +1,19 @@ +#pragma once + +#include "IPublisher.h" +#include "ISubscriber.h" +#include "QueueOptions.h" + +namespace Cloudtoid::Interprocess +{ + /// Factory to create queue publishers and subscribers. + class IQueueFactory + { + public: + virtual ~IQueueFactory() = default; + /// Creates a queue message publisher. + virtual IPublisher* CreatePublisher(const QueueOptions& options) = 0; + /// Creates a queue message subscriber. + virtual ISubscriber* CreateSubscriber(const QueueOptions& options) = 0; + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/ISubscriber.h b/src/Interprocess.Native.Static/ISubscriber.h new file mode 100644 index 0000000..87e591c --- /dev/null +++ b/src/Interprocess.Native.Static/ISubscriber.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +namespace Cloudtoid::Interprocess +{ + /// + /// Message subscriber that subscribes to the messages published by the publisher. + /// + class ISubscriber + { + protected: + ISubscriber() = default; + public: + virtual ~ISubscriber() = default; + ISubscriber(const ISubscriber&) = default; + ISubscriber& operator=(const ISubscriber&) = default; + ISubscriber(ISubscriber&&) = default; + ISubscriber& operator=(ISubscriber&&) = default; + + /// + /// Dequeues a message from the queue if the queue is not empty. This is a non-blocking + /// call and returns immediately. This method does not allocated memory and only populates + /// the that is passed in. Make sure that the buffer is large + /// enough to receive the entire message, or the message is truncated to fit the buffer. + /// + /// The memory buffer that is populated with the message. Make sure + /// that the buffer is large enough to receive the entire message, or the message is + /// truncated to fit the buffer. + /// The dequeued message. + /// Returns if the queue is empty. + virtual bool TryDequeue(std::span buffer, std::span& message) = 0; + + /// + /// Dequeues a message from the queue. If the queue is empty, it *waits* for the + /// arrival of a new message. This call is blocking until a message is received. + /// This method does not allocated memory and only populates + /// the that is passed in. Make sure that the buffer is large + /// enough to receive the entire message, or the message is truncated to fit the buffer. + /// + /// The memory buffer that is populated with the message. Make sure + /// that the buffer is large enough to receive the entire message, or the message is + /// truncated to fit the buffer. + virtual std::span Dequeue(std::span buffer) = 0; + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/Interprocess.Native.Static.vcxproj b/src/Interprocess.Native.Static/Interprocess.Native.Static.vcxproj new file mode 100644 index 0000000..5dc548c --- /dev/null +++ b/src/Interprocess.Native.Static/Interprocess.Native.Static.vcxproj @@ -0,0 +1,153 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + Level4 + true + + + 17.0 + Win32Proj + {a31ea597-838e-4445-953a-4cbdb01519f2} + InterprocessNativeStatic + 10.0 + + + + Application + true + v143 + Unicode + + + Application + false + v143 + true + Unicode + + + Application + true + v143 + Unicode + + + Application + false + v143 + true + Unicode + + + + + + + + + + + + + + + + + + + + + + Level3 + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + Console + true + + + + + Level3 + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + Console + true + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + stdcpp20 + stdc17 + + + Console + true + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + + + Console + true + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/Interprocess.Native.Static/Interprocess.Native.Static.vcxproj.filters b/src/Interprocess.Native.Static/Interprocess.Native.Static.vcxproj.filters new file mode 100644 index 0000000..0525a67 --- /dev/null +++ b/src/Interprocess.Native.Static/Interprocess.Native.Static.vcxproj.filters @@ -0,0 +1,70 @@ + + + + + {a3b9cc81-9829-43ec-9f56-d73d05f77281} + + + {255df030-8fc8-48d9-ad0a-bbe2629a3d38} + + + {527ec378-4923-48c3-a4f5-bd4c3c0a2a76} + + + {e09731cf-d57d-4fe7-ac31-f939109ff6ad} + + + + + Contracts + + + Contracts + + + Contracts + + + Contracts + + + Contracts + + + Memory + + + Memory + + + Memory + + + Memory + + + Queue + + + Queue + + + Queue + + + Semaphore + + + Semaphore + + + Semaphore + + + Semaphore + + + Contracts + + + \ No newline at end of file diff --git a/src/Interprocess.Native.Static/InterprocessSemaphore.h b/src/Interprocess.Native.Static/InterprocessSemaphore.h new file mode 100644 index 0000000..d8abad3 --- /dev/null +++ b/src/Interprocess.Native.Static/InterprocessSemaphore.h @@ -0,0 +1,27 @@ +#pragma once +#include + +#include "IInterprocessSemaphoreReleaser.h" +#include "IInterprocessSemaphoreWaiter.h" +#include "SemaphoreWindows.h" + +namespace Cloudtoid::Interprocess +{ + /// + /// This class opens or creates platform agnostic named semaphore. Named + /// semaphores are synchronization constructs accessible across processes. + /// + class InterprocessSemaphore + { + public: + static IInterprocessSemaphoreReleaser* CreateReleaser(const std::wstring& name) + { + return new Semaphore::Windows::SemaphoreWindows(name); + } + + static IInterprocessSemaphoreWaiter* CreateWaiter(const std::wstring& name) + { + return new Semaphore::Windows::SemaphoreWindows(name); + } + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/MemoryFileWindows.h b/src/Interprocess.Native.Static/MemoryFileWindows.h new file mode 100644 index 0000000..d0f16b5 --- /dev/null +++ b/src/Interprocess.Native.Static/MemoryFileWindows.h @@ -0,0 +1,96 @@ +#pragma once +#include + +#include "IMemoryFile.h" +#include "QueueOptions.h" + +#define NOMINMAX +#include + +namespace Cloudtoid::Interprocess::Memory::Windows +{ + class MemoryFileWindows final : public IMemoryFile + { + const wchar_t* _mapNamePrefix = L"CT_IP_"; + + [[nodiscard]] + static HANDLE CreateOrOpenCore(unsigned long long capacity, const wchar_t* name) + { + const auto capacityHigh = static_cast(capacity >> 32); + const auto capacityLow = static_cast(capacity & 0xFFFFFFFF); + + // Try to create a new memory-mapped file + auto handle = CreateFileMappingW( + INVALID_HANDLE_VALUE, + nullptr, + PAGE_READWRITE, + capacityHigh, + capacityLow, + name + ); + + // CRITICAL: Save GetLastError() immediately after CreateFileMappingW + // because subsequent operations may overwrite it + auto lastError = GetLastError(); + + if (handle != nullptr && handle != INVALID_HANDLE_VALUE) + { + // Check if this is a newly created file vs existing one + bool isNewFile = (lastError != ERROR_ALREADY_EXISTS); + + if (isNewFile) + { + // For newly created memory-mapped files, ensure they are zero-initialized + // Map the entire file to zero it out + void* view = MapViewOfFile(handle, FILE_MAP_WRITE, 0, 0, 0); + if (view != nullptr) + { + // Zero out the entire memory-mapped file + ZeroMemory(view, static_cast(capacity)); + UnmapViewOfFile(view); + } + } + + return handle; + } + + auto error = GetLastError(); + if (error != ERROR_ACCESS_DENIED) + { + throw std::system_error(static_cast(error), std::system_category()); + } + + // Try to open existing file mapping + handle = OpenFileMappingW(PAGE_READWRITE, FALSE, name); + + if (handle != nullptr && handle != INVALID_HANDLE_VALUE) + { + return handle; + } + + error = GetLastError(); + throw std::system_error(static_cast(error), std::system_category()); + } + + public: + ~MemoryFileWindows() override + { + if (_mappedFile != nullptr && _mappedFile != INVALID_HANDLE_VALUE) + { + CloseHandle(_mappedFile); + _mappedFile = nullptr; + } + } + + explicit MemoryFileWindows(const QueueOptions& options) + { + const auto queueName = std::format(L"{}{}", _mapNamePrefix, options.GetQueueName()); + _mappedFile = CreateOrOpenCore(options.GetQueueStorageSize(), queueName.data()); + } + + MemoryFileWindows(const MemoryFileWindows&) = delete; + MemoryFileWindows& operator=(const MemoryFileWindows&) = delete; + MemoryFileWindows(MemoryFileWindows&&) = default; + MemoryFileWindows& operator=(MemoryFileWindows&&) = default; + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/MemoryView.h b/src/Interprocess.Native.Static/MemoryView.h new file mode 100644 index 0000000..b4c8dca --- /dev/null +++ b/src/Interprocess.Native.Static/MemoryView.h @@ -0,0 +1,57 @@ +#pragma once +#include "IMemoryFile.h" +#include "MemoryFileWindows.h" + +namespace Cloudtoid::Interprocess +{ + // This class manages the underlying Memory Mapped File + class MemoryView + { + IMemoryFile* _file; + void* _view; + + public: + ~MemoryView() + { + if (_view != nullptr) + { + UnmapViewOfFile(_view); + _view = nullptr; + } + + if (_file != nullptr) + { + delete _file; + _file = nullptr; + } + } + + explicit MemoryView(const QueueOptions& options) + { + _file = new Memory::Windows::MemoryFileWindows(options); + + _view = MapViewOfFile( + _file->GetMappedFile(), + FILE_MAP_READ | FILE_MAP_WRITE, + 0, + 0, + 0); + + if (_view == nullptr) + { + const auto error = GetLastError(); + throw std::system_error(static_cast(error), std::system_category()); + } + } + + MemoryView(MemoryView& other) = delete; + MemoryView& operator=(MemoryView& other) = delete; + MemoryView(MemoryView&& other) = default; + MemoryView& operator=(MemoryView&& other) = default; + + unsigned char* Pointer() const noexcept + { + return static_cast(_view); + } + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/MessageHeader.h b/src/Interprocess.Native.Static/MessageHeader.h new file mode 100644 index 0000000..3286536 --- /dev/null +++ b/src/Interprocess.Native.Static/MessageHeader.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +namespace Cloudtoid::Interprocess +{ + // We rely on this structure to fit in 64 bits. + // If you change the size of this, no longer many of the assumptions + // taken in this code are going to be valid. + class MessageHeader + { + public: + int State; + int BodyLength; + + static constexpr int LockedToBeConsumedState = 1; + static constexpr int ReadyToBeConsumedState = 2; + + MessageHeader(const int state, const int bodyLength) : + State{state}, + BodyLength{bodyLength} + { + } + }; + + // Assert exact same layout as the C# version + static_assert(sizeof(MessageHeader) == 8, "The MessageHeader must be 8-bytes"); + static_assert(offsetof(MessageHeader, State) == 0, "State must be at offset 0"); + static_assert(offsetof(MessageHeader, BodyLength) == 4, "BodyLength must be at offset 4"); +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/Publisher.h b/src/Interprocess.Native.Static/Publisher.h new file mode 100644 index 0000000..98d9973 --- /dev/null +++ b/src/Interprocess.Native.Static/Publisher.h @@ -0,0 +1,105 @@ +#pragma once + +#include "IInterprocessSemaphoreReleaser.h" +#include "InterprocessSemaphore.h" +#include "IPublisher.h" +#include "Queue.h" + +namespace Cloudtoid::Interprocess +{ + class Publisher final : public Queue, public IPublisher + { + IInterprocessSemaphoreReleaser* _signal; + + public: + explicit Publisher(const QueueOptions& options) : + Queue(options), + _signal(InterprocessSemaphore::CreateReleaser(options.GetQueueName())) + { + } + + ~Publisher() override + { + if (_signal != nullptr) + { + delete _signal; + _signal = nullptr; + } + } + + Publisher(const Publisher&) = delete; + Publisher& operator=(const Publisher&) = delete; + Publisher(Publisher&&) = default; + Publisher& operator=(Publisher&&) = default; + + bool TryEnqueue(std::span message) override + { + auto bodyLength = message.size(); + auto messageLength = GetPaddedMessageLength(bodyLength); + + while (true) + { + auto header = *GetHeader(); + + if (!CheckCapacity(header, messageLength)) + { + return false; + } + + auto writeOffset = header.WriteOffset; + auto newWriteOffset = SafeIncrementMessageOffset(writeOffset, messageLength); + + // try to atomically update the write-offset that is stored in the queue header + if (InterlockedCompareExchange(&GetHeader()->WriteOffset, newWriteOffset, writeOffset) == writeOffset) + { + // write the message body + _buffer->Write(message, GetMessageBodyOffset(writeOffset)); + + // write the message header + _buffer->Write(MessageHeader{MessageHeader::ReadyToBeConsumedState, static_cast(bodyLength)}, + writeOffset); + + // signal the next receiver that there is a new message in the queue + _signal->Release(); + return true; + } + } + } + + private: + bool CheckCapacity(const QueueHeader& header, unsigned long long messageLength) + { + if (messageLength > _buffer->GetCapacity()) + { + return false; + } + + if (header.IsEmpty()) + { + return true; // it is an empty queue + } + + auto readOffset = header.ReadOffset % _buffer->GetCapacity(); + auto writeOffset = header.WriteOffset % _buffer->GetCapacity(); + + if (readOffset == writeOffset) + { + return false; // queue is full + } + + if (readOffset < writeOffset) + { + if (messageLength > _buffer->GetCapacity() + readOffset - writeOffset) + { + return false; + } + } + else if (messageLength > readOffset - writeOffset) + { + return false; + } + + return true; + } + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/Queue.h b/src/Interprocess.Native.Static/Queue.h new file mode 100644 index 0000000..7a301e5 --- /dev/null +++ b/src/Interprocess.Native.Static/Queue.h @@ -0,0 +1,65 @@ +#pragma once + +#include "CircularBuffer.h" +#include "MemoryView.h" +#include "MessageHeader.h" +#include "QueueHeader.h" +#include "QueueOptions.h" + +namespace Cloudtoid::Interprocess +{ + class Queue + { + MemoryView* _view; + + public: + explicit Queue(const QueueOptions& options) + { + _view = new MemoryView(options); + _buffer = new CircularBuffer(_view->Pointer() + sizeof(QueueHeader), options.GetCapacity()); + } + + virtual ~Queue() + { + if (_view != nullptr) + { + delete _view; + _view = nullptr; + } + } + + Queue(const Queue&) = default; + Queue& operator=(const Queue&) = default; + Queue(Queue&&) = default; + Queue& operator=(Queue&&) = default; + + [[nodiscard]] + QueueHeader* GetHeader() const + { + return reinterpret_cast(_view->Pointer()); + } + + protected: + CircularBuffer* _buffer; + + static unsigned long long GetMessageBodyOffset(const unsigned long long startOffset) + { + return startOffset + sizeof(MessageHeader); + } + + static unsigned long long GetPaddedMessageLength(const unsigned long long bodyLength) + { + const auto length = sizeof(MessageHeader) + bodyLength; + + // Round up to the closest integer divisible by 8. This will add the [padding] if one is needed. + return 8 * static_cast(std::ceil(static_cast(length) / 8.0)); + } + + [[nodiscard]] + unsigned long long SafeIncrementMessageOffset(const unsigned long long offset, + const unsigned long long increment) const + { + return (offset + increment) % (_buffer->GetCapacity() * 2); + } + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/QueueFactory.h b/src/Interprocess.Native.Static/QueueFactory.h new file mode 100644 index 0000000..26a0487 --- /dev/null +++ b/src/Interprocess.Native.Static/QueueFactory.h @@ -0,0 +1,34 @@ +#pragma once + +#include "IQueueFactory.h" +#include "Publisher.h" +#include "Subscriber.h" + +namespace Cloudtoid::Interprocess +{ + // + class QueueFactory final : public IQueueFactory + { + public: + // + QueueFactory() = default; + ~QueueFactory() override = default; + QueueFactory(const QueueFactory&) = default; + QueueFactory& operator=(const QueueFactory&) = default; + QueueFactory(QueueFactory&&) = default; + QueueFactory& operator=(QueueFactory&&) = default; + + // + IPublisher* CreatePublisher(const QueueOptions& options) override + { + return new Publisher(options); + } + + // + ISubscriber* CreateSubscriber(const QueueOptions& options) override + { + return new Subscriber(options); + } + }; + static_assert(sizeof(void*) == 8, "64-bit architecture required"); +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/QueueHeader.h b/src/Interprocess.Native.Static/QueueHeader.h new file mode 100644 index 0000000..b9e2905 --- /dev/null +++ b/src/Interprocess.Native.Static/QueueHeader.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +namespace Cloudtoid::Interprocess +{ + class QueueHeader + { + public: + /// + /// Where the next message could potentially be read + /// + unsigned long long ReadOffset; + + /// + /// Where the next message could potentially be written + /// + unsigned long long WriteOffset; + + /// + /// Time (ticks) at which the read lock was taken. It is set to zero if not lock + /// + unsigned long long ReadLockTimeStamp; + + /// + /// Not used and might be used in the future + /// + unsigned long long Reserved; + + bool IsEmpty() const noexcept + { + return ReadOffset == WriteOffset; + } + }; + + // Assert exact same layout as the C# version + static_assert(sizeof(QueueHeader) == 32, "The QueueHeader must be 32-bytes"); + static_assert(offsetof(QueueHeader, ReadOffset) == 0, "ReadOffset must be at offset 0"); + static_assert(offsetof(QueueHeader, WriteOffset) == 8, "WriteOffset must be at offset 8"); + static_assert(offsetof(QueueHeader, ReadLockTimeStamp) == 16, "ReadLockTimeStamp must be at offset 16"); + static_assert(offsetof(QueueHeader, Reserved) == 24, "Reserved must be at offset 24"); +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/QueueOptions.h b/src/Interprocess.Native.Static/QueueOptions.h new file mode 100644 index 0000000..a9a1620 --- /dev/null +++ b/src/Interprocess.Native.Static/QueueOptions.h @@ -0,0 +1,93 @@ +#pragma once + +#include +#include +#include + +#include "QueueHeader.h" + +namespace Cloudtoid::Interprocess +{ + /// The options to create a queue. + class QueueOptions + { + public: + /// + /// Initializes a new instance of the class. + /// + /// The unique name of the queue. + /// The maximum capacity of the queue in bytes. This should be at least 16 bytes long and in the multiples of 8 + QueueOptions(const std::wstring_view queueName, const unsigned long long capacity) : + QueueOptions(queueName, {}, capacity) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// The unique name of the queue. + /// The path to the directory/folder in which the memory mapped and other files are stored in + /// The maximum capacity of the queue in bytes. This should be at least 16 bytes long and in the multiples of 8 + QueueOptions(const std::wstring_view queueName, const std::wstring_view path, const unsigned long long capacity) + { + if (queueName.empty()) + { + throw std::invalid_argument("queueName"); + } + _queueName = queueName; + + _path = path; + + if (capacity < 16) + { + throw std::invalid_argument("capacity"); + } + + if (capacity % 8 != 0) + { + throw std::invalid_argument("capacity must be a multiple of 8"); + } + _capacity = capacity; + } + + /// + /// Gets the unique name of the queue. + /// + [[nodiscard]] + const std::wstring& GetQueueName() const noexcept + { + return _queueName; + } + + /// + /// Gets the path to the directory/folder in which the memory mapped and other files are stored in. + /// + [[nodiscard]] + std::wstring_view GetPath() const noexcept + { + return _path; + } + + /// + /// Gets the size of the queue in bytes. This does NOT include the space needed for the queue header. + /// + [[nodiscard]] + unsigned long long GetCapacity() const noexcept + { + return _capacity; + } + + /// + /// Gets the full size of the queue that includes both the header and message sections + /// + unsigned long long GetQueueStorageSize() const noexcept + { + return sizeof(QueueHeader) + _capacity; + } + + private: + std::wstring _queueName; + std::wstring _path; + unsigned long long _capacity; + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/SemaphoreWindows.h b/src/Interprocess.Native.Static/SemaphoreWindows.h new file mode 100644 index 0000000..0e66426 --- /dev/null +++ b/src/Interprocess.Native.Static/SemaphoreWindows.h @@ -0,0 +1,62 @@ +#pragma once + +#include +#include +#include + +#include "IInterprocessSemaphoreReleaser.h" +#include "IInterprocessSemaphoreWaiter.h" +#include "MemoryFileWindows.h" + +namespace Cloudtoid::Interprocess::Semaphore::Windows +{ + class SemaphoreWindows final : public IInterprocessSemaphoreReleaser, public IInterprocessSemaphoreWaiter + { + const wchar_t* _handleNamePrefix = L"Global\\CT.IP."; + HANDLE _handle; + + public: + explicit SemaphoreWindows(const std::wstring& name) + { + auto full_name = std::format(L"{}{}", _handleNamePrefix, name); + + _handle = CreateSemaphoreW( + nullptr, + 0, + std::numeric_limits::max(), + full_name.data()); + + if (_handle == nullptr || _handle == INVALID_HANDLE_VALUE) + { + throw std::system_error(static_cast(GetLastError()), std::system_category()); + } + } + + ~SemaphoreWindows() override + { + if (_handle != nullptr) + { + CloseHandle(_handle); + _handle = nullptr; + } + } + + SemaphoreWindows(const SemaphoreWindows&) = default; + SemaphoreWindows& operator=(const SemaphoreWindows&) = default; + SemaphoreWindows(SemaphoreWindows&&) = default; + SemaphoreWindows& operator=(SemaphoreWindows&&) = default; + + void Release() override + { + if (FALSE == ReleaseSemaphore(_handle, 1, nullptr)) + { + throw std::system_error(static_cast(GetLastError()), std::system_category()); + } + } + + bool Wait(const int millisecondTimeout) override + { + return WaitForSingleObject(_handle, millisecondTimeout) == WAIT_OBJECT_0; + } + }; +} // namespace Cloudtoid::Interprocess diff --git a/src/Interprocess.Native.Static/Subscriber.h b/src/Interprocess.Native.Static/Subscriber.h new file mode 100644 index 0000000..af81687 --- /dev/null +++ b/src/Interprocess.Native.Static/Subscriber.h @@ -0,0 +1,143 @@ +#pragma once + +#include +#include +#include +#include "IInterprocessSemaphoreWaiter.h" +#include "InterprocessSemaphore.h" +#include "ISubscriber.h" +#include "Queue.h" +#include "MessageHeader.h" + +namespace Cloudtoid::Interprocess +{ + class Subscriber final : public Queue, public ISubscriber + { + IInterprocessSemaphoreWaiter* _waiter; + + public: + explicit Subscriber(const QueueOptions& options) : + Queue(options), + _waiter(InterprocessSemaphore::CreateWaiter(options.GetQueueName())) + { + } + + ~Subscriber() override + { + if (_waiter != nullptr) + { + delete _waiter; + _waiter = nullptr; + } + } + + Subscriber(const Subscriber&) = delete; + Subscriber& operator=(const Subscriber&) = delete; + Subscriber(Subscriber&&) = default; + Subscriber& operator=(Subscriber&&) = default; + + bool TryDequeue(std::span buffer, std::span& message) override + { + try + { + auto* header = GetHeader(); + if (header == nullptr) + { + message = std::span(); + return false; + } + + // Check if there are any messages available + if (header->IsEmpty()) + { + message = std::span(); + return false; + } + + // Get the current read position + auto readPosition = header->ReadOffset; + + // Read the message header + std::vector headerBuffer(sizeof(MessageHeader)); + auto headerData = _buffer->Read(readPosition, sizeof(MessageHeader), std::span(headerBuffer)); + if (headerData.size() < sizeof(MessageHeader)) + { + message = std::span(); + return false; + } + + // Copy the header data to a MessageHeader struct + MessageHeader messageHeader(0, 0); + std::memcpy(&messageHeader, headerData.data(), sizeof(MessageHeader)); + + // Validate the message header state + if (messageHeader.State != MessageHeader::ReadyToBeConsumedState) + { + message = std::span(); + return false; + } + + // Calculate message body length + auto bodyLength = static_cast(messageHeader.BodyLength); + if (bodyLength == 0 || bodyLength > buffer.size()) + { + // Skip this message if it's empty or too large for the buffer + auto paddedLength = GetPaddedMessageLength(bodyLength); + + // Atomically update the read position + auto newReadOffset = SafeIncrementMessageOffset(readPosition, paddedLength); + InterlockedExchange(&header->ReadOffset, newReadOffset); + + message = std::span(); + return bodyLength > buffer.size(); // Return true if message was too large + } + + // Read the message body + auto bodyOffset = GetMessageBodyOffset(readPosition); + std::vector bodyBuffer(bodyLength); + auto bodyData = _buffer->Read(bodyOffset, bodyLength, std::span(bodyBuffer)); + if (bodyData.size() < bodyLength) + { + message = std::span(); + return false; + } + + // Copy the body data to the buffer + std::memcpy(buffer.data(), bodyData.data(), bodyLength); + + // Atomically update the read position + auto paddedLength = GetPaddedMessageLength(bodyLength); + auto newReadOffset = SafeIncrementMessageOffset(readPosition, paddedLength); + InterlockedExchange(&header->ReadOffset, newReadOffset); + + // Return the message span + message = std::span(buffer.data(), bodyLength); + return true; + } + catch (...) + { + message = std::span(); + return false; + } + } + + std::span Dequeue(std::span buffer) override + { + std::span message; + + // Keep trying until we get a message + while (true) + { + if (TryDequeue(buffer, message)) + { + return message; + } + + // Wait for a signal that a new message is available + // Use a timeout to avoid infinite blocking + constexpr int timeoutMs = 100; + _waiter->Wait(timeoutMs); + } + } + }; +} // namespace Cloudtoid::Interprocess \ No newline at end of file diff --git a/src/Interprocess.Tests/CircularBufferTests.cs b/src/Interprocess.Tests/CircularBufferTests.cs index a8df457..90212fd 100644 --- a/src/Interprocess.Tests/CircularBufferTests.cs +++ b/src/Interprocess.Tests/CircularBufferTests.cs @@ -145,4 +145,325 @@ public void CanZeroBlock(long offset, long length) buffer.Read(offset, length).ToArray().All(i => i == 0).Should().BeTrue(); } } + + [Fact] + public void WriteSpanWrapping() + { + // Test that spans wrap correctly across buffer boundary + var b = new byte[10]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 10); + var data = new byte[] { 1, 2, 3, 4, 5 }; + + buffer.Write(data, 8); // Start near end, should wrap + + var result = buffer.Read(8, 5); + result.ToArray().Should().BeEquivalentTo(data, options => options.WithStrictOrdering()); + } + } + + [Fact] + public void WriteAtExactBoundary() + { + // Test writing at exact buffer boundary + var b = new byte[10]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 10); + var data = new byte[] { 1, 2, 3 }; + + buffer.Write(data, 10); // Exactly at boundary, should wrap to 0 + + var result = buffer.Read(0, 3); + result.ToArray().Should().BeEquivalentTo(data, options => options.WithStrictOrdering()); + } + } + + [Fact] + public void ReadWrapping() + { + // Test that reads wrap correctly across buffer boundary + var b = new byte[10]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 10); + + // Fill buffer with known values + for (int i = 0; i < 10; i++) + b[i] = (byte)(i + 1); + + // Read across boundary + var result = buffer.Read(8, 4); + result.ToArray().Should().BeEquivalentTo(new byte[] { 9, 10, 1, 2 }); + } + } + + [Fact] + public void ClearWrapping() + { + // Test that clear operations wrap correctly + var b = new byte[10]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 10); + + // Fill with non-zero values + for (int i = 0; i < 10; i++) + b[i] = 0xFF; + + // Clear across boundary + buffer.Clear(8, 4); + + var result = buffer.Read(0, 10); + result.Span[0].Should().Be(0); // Wrapped from clear + result.Span[1].Should().Be(0); // Wrapped from clear + result.Span[2].Should().Be(0xFF); // Not cleared + result.Span[8].Should().Be(0); // Cleared + result.Span[9].Should().Be(0); // Cleared + } + } + + [Fact] + public void WriteAndReadRoundTripWrapping() + { + // Test write/read cycle with wrapping + var b = new byte[20]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 20); + var data = new byte[] { 0xAA, 0xBB, 0xCC, 0xDD, 0xEE }; + + buffer.Write(data, 18); // Wraps at offset 20 + var result = buffer.Read(18, 5); + + result.ToArray().Should().BeEquivalentTo(data, options => options.WithStrictOrdering()); + } + } + + [Fact] + public void MultipleSequentialWrites() + { + // Test multiple sequential writes to ensure consistency + var b = new byte[30]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 30); + + var data1 = new byte[] { 1, 2, 3 }; + var data2 = new byte[] { 4, 5, 6 }; + var data3 = new byte[] { 7, 8, 9 }; + + buffer.Write(data1, 0); + buffer.Write(data2, 3); + buffer.Write(data3, 6); + + var result = buffer.Read(0, 9); + result.ToArray().Should().BeEquivalentTo(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 }); + } + } + + [Fact] + public void SingleByteBuffer() + { + // Edge case: buffer with capacity of 1 + var b = new byte[1]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 1); + + byte[] data = [42]; + buffer.Write(data, 0); + + var result = buffer.Read(0, 1); + result.Span[0].Should().Be(42); + } + } + + [Fact] + public void LargeOffsetWrapping() + { + // Test very large offsets that wrap multiple times + var b = new byte[10]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 10); + + // Fill with known pattern + for (int i = 0; i < 10; i++) + b[i] = (byte)i; + + // Very large offset: 1000005 % 10 = 5 + var result = buffer.Read(1000005, 3); + result.ToArray().Should().BeEquivalentTo(new byte[] { 5, 6, 7 }); + } + } + + [Fact] + public void FullBufferWriteAndRead() + { + // Test writing and reading entire buffer capacity + const int bufferSize = 256; + var b = new byte[bufferSize]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, bufferSize); + + var writeData = Enumerable.Range(0, bufferSize).Select(i => (byte)i).ToArray(); + buffer.Write(writeData, 0); + + var result = buffer.Read(0, bufferSize); + result.ToArray().Should().BeEquivalentTo(writeData, options => options.WithStrictOrdering()); + } + } + + [Fact] + public void OverwritePreviousData() + { + // Test that new writes correctly overwrite old data + var b = new byte[10]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 10); + + var data1 = new byte[] { 1, 2, 3, 4, 5 }; + buffer.Write(data1, 0); + + var data2 = new byte[] { 10, 20, 30 }; + buffer.Write(data2, 0); + + var result = buffer.Read(0, 5); + result.Span[0].Should().Be(10); + result.Span[1].Should().Be(20); + result.Span[2].Should().Be(30); + result.Span[3].Should().Be(4); // Original data + result.Span[4].Should().Be(5); // Original data + } + } + + [Fact] + public void AlternatingWriteAndClear() + { + // Test alternating write and clear operations + var b = new byte[20]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 20); + + var data = new byte[] { 1, 2, 3, 4, 5 }; + + buffer.Write(data, 0); + buffer.Clear(0, 5); + + buffer.Write(data, 5); + buffer.Clear(5, 5); + + buffer.Write(data, 10); + + var result = buffer.Read(0, 20); + + // First 10 bytes should be zero + for (int i = 0; i < 10; i++) + result.Span[i].Should().Be(0); + + // Last 5 bytes should have the data + for (int i = 10; i < 15; i++) + result.Span[i].Should().Be(data[i - 10]); + } + } + + [Fact] + public void DataIntegrityWithAllByteValues() + { + // Test that all 256 byte values are preserved correctly + const int bufferSize = 512; + var b = new byte[bufferSize]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, bufferSize); + + // Write all 256 byte values twice + var allBytes = Enumerable.Range(0, 256).Select(i => (byte)i).ToArray(); + var doubleBytes = allBytes.Concat(allBytes).ToArray(); + + buffer.Write(doubleBytes, 0); + var result = buffer.Read(0, 512); + + result.ToArray().Should().BeEquivalentTo(doubleBytes, options => options.WithStrictOrdering()); + } + } + + [Fact] + public void OffsetHandlingConsistency() + { + // Regression test: Ensure offset handling is consistent across operations + var b = new byte[10]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 10); + + var data = new byte[] { 1, 2, 3 }; + + // Write at offsets that should all wrap to 0 + buffer.Write(data, 0); + buffer.Write(data, 10); + buffer.Write(data, 20); + + // All writes should have gone to offset 0 due to wrapping + var result = buffer.Read(0, 3); + result.ToArray().Should().BeEquivalentTo(data); + } + } + + [Fact] + public void ClearEntireBuffer() + { + // Test clearing the entire buffer + var b = new byte[50]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, 50); + + // Fill with non-zero values + for (int i = 0; i < 50; i++) + b[i] = 0xFF; + + buffer.Clear(0, 50); + + var result = buffer.Read(0, 50); + result.ToArray().All(x => x == 0).Should().BeTrue(); + } + } + + [Theory] + [InlineData(100)] + [InlineData(500)] + [InlineData(1000)] + public void StressTestManyOperations(int iterations) + { + // Stress test with many sequential operations + const int bufferSize = 128; + var b = new byte[bufferSize]; + fixed (byte* ptr = &b[0]) + { + var buffer = new CircularBuffer(ptr, bufferSize); + + for (int i = 0; i < iterations; i++) + { + int offset = i % bufferSize; +#pragma warning disable IDE0047 // Remove unnecessary parentheses + int length = ((i * 7) % 20) + 1; // Varying lengths 1-20 +#pragma warning restore IDE0047 + + var data = Enumerable.Range(0, length).Select(x => (byte)((i + x) % 256)).ToArray(); + buffer.Write(data, offset); + + var result = buffer.Read(offset, length); + result.ToArray().Should().BeEquivalentTo( + data, + options => options.WithStrictOrdering(), + $"Failed on iteration {i}"); + } + } + } } \ No newline at end of file diff --git a/src/Interprocess.Tests/QueueTests.cs b/src/Interprocess.Tests/QueueTests.cs index 3c51bd4..f65cd8b 100644 --- a/src/Interprocess.Tests/QueueTests.cs +++ b/src/Interprocess.Tests/QueueTests.cs @@ -256,6 +256,554 @@ public void CanRecoverIfPublisherCrashes() message.ToArray().Should().BeEquivalentTo(ByteArray1); } + // ===== Data Integrity Tests (inspired by C++ suite) ===== + + [Fact] + [TestBeforeAfter] + public void DataIntegrityAllByteValues() + { + // Test that all 256 possible byte values (0-255) are preserved correctly + // This protects against encoding/decoding issues during cross-language interop + using var p = CreatePublisher(1024 * 1024); + using var s = CreateSubscriber(1024 * 1024); + + var allBytes = Enumerable.Range(0, 256).Select(i => (byte)i).ToArray(); + + p.TryEnqueue(allBytes).Should().BeTrue(); + var received = s.Dequeue(default); + + received.Length.Should().Be(256); + received.ToArray().Should().BeEquivalentTo(allBytes, options => options.WithStrictOrdering()); + } + + [Theory] + [InlineData(7)] // Non-aligned size + [InlineData(8)] // Aligned to 8 bytes + [InlineData(9)] // Non-aligned size + [InlineData(15)] + [InlineData(16)] + [InlineData(17)] + [TestBeforeAfter] + public void MessageAlignmentPreserved(int messageSize) + { + // Test that messages of various sizes (aligned and non-aligned) are preserved correctly + // This catches bugs where alignment assumptions corrupt data + using var p = CreatePublisher(1024 * 1024); + using var s = CreateSubscriber(1024 * 1024); + + var message = Enumerable.Range(0, messageSize).Select(i => (byte)(i % 256)).ToArray(); + + p.TryEnqueue(message).Should().BeTrue(); + var received = s.Dequeue(default); + + received.Length.Should().Be(messageSize); + received.ToArray().Should().BeEquivalentTo(message, options => options.WithStrictOrdering()); + } + + [Fact] + [TestBeforeAfter] + public void OddEvenPatternDetection() + { + // Test that byte patterns are preserved exactly + // Helps catch bugs where data is corrupted during transmission + using var p = CreatePublisher(1024 * 1024); + using var s = CreateSubscriber(1024 * 1024); + + var pattern = new byte[100]; + for (int i = 0; i < pattern.Length; i++) + pattern[i] = (byte)(i % 2 == 0 ? 0xAA : 0x55); + + p.TryEnqueue(pattern).Should().BeTrue(); + var received = s.Dequeue(default); + + received.ToArray().Should().BeEquivalentTo(pattern, options => options.WithStrictOrdering()); + + // Verify pattern is intact + for (int i = 0; i < received.Length; i++) + { + received.Span[i].Should().Be( + (byte)(i % 2 == 0 ? 0xAA : 0x55), + $"Pattern corruption at index {i}"); + } + } + + // ===== Circular Buffer Wrapping Tests ===== + + [Fact] + [TestBeforeAfter] + public void CircularBufferWrapping() + { + // Test that the circular buffer correctly wraps around after many iterations + // This catches off-by-one errors in offset calculations + const int iterations = 100; + const int messageSize = 50; + using var p = CreatePublisher(1024); // Small buffer to force wrapping + using var s = CreateSubscriber(1024); + + for (int i = 0; i < iterations; i++) + { + var message = Enumerable.Range(0, messageSize).Select(j => (byte)((i + j) % 256)).ToArray(); + + p.TryEnqueue(message).Should().BeTrue($"Failed to enqueue on iteration {i}"); + var received = s.Dequeue(default); + + received.Length.Should().Be(messageSize, $"Wrong length on iteration {i}"); + received.ToArray().Should().BeEquivalentTo( + message, + options => options.WithStrictOrdering(), + $"Data corruption on iteration {i}"); + } + } + + // ===== Stress Tests ===== + + [Fact] + [TestBeforeAfter] + public void LargeNumberOfSmallMessages() + { + // Stress test with many small messages to catch capacity and wrapping issues + const int messageCount = 500; + const int messageSize = 10; + using var p = CreatePublisher(10 * 1024 * 1024); + using var s = CreateSubscriber(10 * 1024 * 1024); + + for (int i = 0; i < messageCount; i++) + { + var message = Enumerable.Range(0, messageSize).Select(j => (byte)((i + j) % 256)).ToArray(); + p.TryEnqueue(message).Should().BeTrue($"Failed to enqueue message {i}"); + } + + for (int i = 0; i < messageCount; i++) + { + var received = s.Dequeue(default); + received.Length.Should().Be(messageSize, $"Wrong length for message {i}"); + + var expected = Enumerable.Range(0, messageSize).Select(j => (byte)((i + j) % 256)).ToArray(); + received.ToArray().Should().BeEquivalentTo( + expected, + options => options.WithStrictOrdering(), + $"Data corruption in message {i}"); + } + } + + // ===== Edge Cases ===== + + [Fact] + [TestBeforeAfter] + public void MaximumMessageSize() + { + // Test large messages near capacity + const int capacity = 10 * 1024 * 1024; // 10MB + const int messageSize = 1024 * 1024; // 1MB + using var p = CreatePublisher(capacity); + using var s = CreateSubscriber(capacity); + + var message = new byte[messageSize]; + for (int i = 0; i < messageSize; i++) + message[i] = (byte)(i % 256); + + p.TryEnqueue(message).Should().BeTrue(); + var received = s.Dequeue(default); + + received.Length.Should().Be(messageSize); + received.ToArray().Should().BeEquivalentTo(message, options => options.WithStrictOrdering()); + } + + [Theory] + [InlineData(1, 256)] + [InlineData(7, 256)] + [InlineData(8, 256)] + [InlineData(16, 256)] + [InlineData(32, 256)] + [InlineData(64, 128)] + [InlineData(128, 64)] + [InlineData(256, 32)] + [TestBeforeAfter] + public void VaryingMessageSizes(int messageSize, int messageCount) + { + // Test various message size and count combinations + using var p = CreatePublisher(10 * 1024 * 1024); + using var s = CreateSubscriber(10 * 1024 * 1024); + + // Drain any leftover messages from previous test iterations + while (s.TryDequeue(default, out _)) + { + // Keep draining until empty + } + + for (int i = 0; i < messageCount; i++) + { + var message = Enumerable.Range(0, messageSize).Select(j => (byte)((i + j) % 256)).ToArray(); + p.TryEnqueue(message).Should().BeTrue($"Failed to enqueue message {i}"); + } + + for (int i = 0; i < messageCount; i++) + { + var received = s.Dequeue(default); + received.Length.Should().Be(messageSize); + + var expected = Enumerable.Range(0, messageSize).Select(j => (byte)((i + j) % 256)).ToArray(); + received.ToArray().Should().BeEquivalentTo(expected, options => options.WithStrictOrdering()); + } + } + + [Fact] + [TestBeforeAfter] + public async Task MultipleConcurrentPublishers_NoDataCorruptionAsync() + { + // Test multiple publishers writing simultaneously + const int publisherCount = 10; + const int messagesPerPublisher = 50; + const int totalMessages = publisherCount * messagesPerPublisher; + using var p = CreatePublisher(10 * 1024 * 1024); + using var s = CreateSubscriber(10 * 1024 * 1024); + + var tasks = new Task[publisherCount]; + using var barrier = new Barrier(publisherCount); + + for (int publisherId = 0; publisherId < publisherCount; publisherId++) + { + int id = publisherId; + tasks[id] = Task.Run(async () => + { + barrier.SignalAndWait(); // Ensure all start simultaneously + + for (int i = 0; i < messagesPerPublisher; i++) + { + // Each publisher sends unique values: publisherId * 1000 + messageIndex + var value = (id * 1000) + i; + var message = BitConverter.GetBytes(value); + + bool enqueued = false; + while (!enqueued) + { + enqueued = p.TryEnqueue(message); + if (!enqueued) + await Task.Delay(1); + } + } + }); + } + + await Task.WhenAll(tasks); + + // Verify all messages received and no corruption + var receivedValues = new HashSet(); + for (int i = 0; i < totalMessages; i++) + { + var received = s.Dequeue(default); + received.Length.Should().Be(sizeof(int)); + + var value = BitConverter.ToInt32(received.Span); + receivedValues.Add(value).Should().BeTrue($"Duplicate value {value} received"); + } + + receivedValues.Count.Should().Be(totalMessages); + } + + [Fact] + [TestBeforeAfter] + public void MultipleSubscribersSeeSameData() + { + // Test that multiple subscribers share the same ReadOffset and consume messages sequentially + // Validates that ReadOffset is properly synchronized across subscriber instances + const int capacity = 10 * 1024 * 1024; + + using var p = CreatePublisher(capacity); + using var s1 = CreateSubscriber(capacity); + using var s2 = CreateSubscriber(capacity); + + // Enqueue two distinct messages + byte[] data1 = [10, 20, 30, 40]; + byte[] data2 = [50, 60, 70, 80]; + + p.TryEnqueue(data1).Should().BeTrue("Failed to enqueue first message"); + p.TryEnqueue(data2).Should().BeTrue("Failed to enqueue second message"); + + // Subscriber1 reads first message + s1.TryDequeue(default, out var message1).Should().BeTrue( + "Subscriber1 should be able to read first message"); + message1.Length.Should().Be(4); + message1.ToArray().Should().BeEquivalentTo(data1, options => options.WithStrictOrdering()); + + // Subscriber2 reads next message (should get data2, not data1, because ReadOffset moved) + s2.TryDequeue(default, out var message2).Should().BeTrue( + "Subscriber2 should be able to read next message"); + message2.Length.Should().Be(4); + message2.ToArray().Should().BeEquivalentTo( + data2, + options => options.WithStrictOrdering(), + "Subscriber2 should get second message since ReadOffset is shared"); + + // Verify queue is now empty + s1.TryDequeue(default, out _).Should().BeFalse("Queue should be empty after both messages consumed"); + s2.TryDequeue(default, out _).Should().BeFalse("Queue should be empty after both messages consumed"); + } + + [Fact] + [TestBeforeAfter] + public void PublisherAndSubscriber_ShareSameQueueHeader() + { + // Critical: Verify both see same ReadOffset/WriteOffset for proper synchronization + using var p = CreatePublisher(1024 * 1024); + using var s = CreateSubscriber(1024 * 1024); + + // Initially both should see zero offsets + var message = new byte[] { 1, 2, 3, 4 }; + p.TryEnqueue(message).Should().BeTrue(); + + // After enqueue, subscriber should see the write + var received = s.Dequeue(default); + received.ToArray().Should().BeEquivalentTo(message); + + // Enqueue multiple messages + for (int i = 0; i < 10; i++) + { + var msg = BitConverter.GetBytes(i); + p.TryEnqueue(msg).Should().BeTrue(); + } + + // Dequeue half of them + for (int i = 0; i < 5; i++) + s.Dequeue(default); + + // Enqueue more - should use freed space + for (int i = 0; i < 5; i++) + { + var msg = BitConverter.GetBytes(i + 100); + p.TryEnqueue(msg).Should().BeTrue(); + } + + // Verify remaining messages are correct + for (int i = 5; i < 10; i++) + { + var received2 = s.Dequeue(default); + BitConverter.ToInt32(received2.Span).Should().Be(i); + } + + for (int i = 0; i < 5; i++) + { + var received3 = s.Dequeue(default); + BitConverter.ToInt32(received3.Span).Should().Be(i + 100); + } + } + + [Fact] + [TestBeforeAfter] + public void Queue_RejectsMessageWhenFull() + { + // Test capacity enforcement + const int capacity = 1024; + const int messageSize = 100; + + using var p = CreatePublisher(capacity); + using var s = CreateSubscriber(capacity); + + var message = new byte[messageSize]; + int messagesEnqueued = 0; + + // Fill the queue + while (p.TryEnqueue(message)) + { + messagesEnqueued++; + if (messagesEnqueued > 100) // Safety limit + break; + } + + messagesEnqueued.Should().BeGreaterThan(0, "Should have enqueued at least one message"); + + // Try to enqueue one more - should fail + p.TryEnqueue(message).Should().BeFalse("Queue should be full"); + + // Dequeue one message + _ = s.Dequeue(default); + + // Now should be able to enqueue again + p.TryEnqueue(message).Should().BeTrue("Space should be available after dequeue"); + } + + [Fact] + [TestBeforeAfter] + public void Queue_CapacityFreedAfterDequeue() + { + // Verify space is properly reclaimed + const int capacity = 2048; + const int messageSize = 200; + + using var p = CreatePublisher(capacity); + using var s = CreateSubscriber(capacity); + + var message = new byte[messageSize]; + + // Fill queue + int firstBatch = 0; + while (p.TryEnqueue(message)) + { + firstBatch++; + if (firstBatch > 50) // Safety + break; + } + + // Should be full now + p.TryEnqueue(message).Should().BeFalse(); + + // Dequeue half + for (int i = 0; i < firstBatch / 2; i++) + _ = s.Dequeue(default); + + // Should be able to enqueue more now + int secondBatch = 0; + while (p.TryEnqueue(message)) + { + secondBatch++; + if (secondBatch > 50) // Safety + break; + } + + secondBatch.Should().BeGreaterThan(0, "Should reclaim space after dequeue"); + } + + [Fact(Timeout = 10_000)] + [TestBeforeAfter] + public async Task CrossInstanceQueueStateConsistencyAsync() + { + // Test that multiple publisher and subscriber instances can share queue state while active + // Verifies that ReadOffset/WriteOffset are correctly synchronized across concurrent instances + // Note: Unlike C++, C# implementation requires at least one publisher to remain alive + // to maintain queue state - this is a known implementation characteristic + const int messagesPerPublisher = 5; + const int publisherCount = 3; + const int totalMessages = messagesPerPublisher * publisherCount; + const int capacity = 10 * 1024 * 1024; + + // Drain any leftover messages + using (var drainSub = CreateSubscriber(capacity)) + { + while (drainSub.TryDequeue(default, out _)) + { + // Keep draining + } + } + + // Keep all publishers alive while writing + var publishers = new List(); + var allSentValues = new List(); + + try + { + // Phase 1: Create multiple publishers and have them all write + for (int pubIndex = 0; pubIndex < publisherCount; pubIndex++) + { + var publisher = CreatePublisher(capacity); + publishers.Add(publisher); + + for (int msgIndex = 0; msgIndex < messagesPerPublisher; msgIndex++) + { + byte value = (byte)(100 + (pubIndex * 10) + msgIndex); + allSentValues.Add(value); + + byte[] message = [value]; + publisher.TryEnqueue(message).Should().BeTrue( + $"Publisher {pubIndex}, message {msgIndex} (value {value}) failed"); + } + } + + // Phase 2: Read with multiple subscribers while publishers are still alive + var allReceivedValues = new List(); + + for (int subIndex = 0; subIndex < publisherCount; subIndex++) + { + using var subscriber = CreateSubscriber(capacity); + + for (int msgIndex = 0; msgIndex < messagesPerPublisher; msgIndex++) + { + subscriber.TryDequeue(default, out var received).Should().BeTrue( + $"Subscriber {subIndex}, message {msgIndex} failed"); + + received.Length.Should().Be( + 1, + $"Subscriber {subIndex}, message {msgIndex} wrong length"); + + allReceivedValues.Add(received.Span[0]); + } + } + + // Phase 3: Verify all messages received in correct order + allReceivedValues.Count.Should().Be(totalMessages); + + for (int i = 0; i < totalMessages; i++) + { + allReceivedValues[i].Should().Be( + allSentValues[i], + $"Position {i}: Expected {allSentValues[i]}, Got {allReceivedValues[i]}"); + } + + // Phase 4: Verify queue is empty + using var finalSubscriber = CreateSubscriber(capacity); + finalSubscriber.TryDequeue(default, out _).Should().BeFalse("Queue should be empty"); + } + finally + { + // Clean up all publishers + foreach (var pub in publishers) + pub.Dispose(); + } + + // This is so we can use the Timeout attribute - this test can block forever if the implementation breaks + await Task.CompletedTask; + } + + [Theory] + [InlineData(15)] // Not multiple of 8 + [InlineData(7)] // Too small + [InlineData(0)] // Zero + [InlineData(-1)] // Negative + [TestBeforeAfter] + public void QueueOptions_RejectsInvalidCapacity(long capacity) + { + // Should throw for invalid capacities + var action = () => new QueueOptions("test", fixture.Path, capacity); + action.Should().Throw(); + } + + [Fact] + [TestBeforeAfter] + public void QueueOptions_RequiresQueueName() + { + // Should throw for null/empty names + var action1 = () => new QueueOptions(null!, fixture.Path, 1024); + action1.Should().Throw(); + + var action2 = () => new QueueOptions(string.Empty, fixture.Path, 1024); + action2.Should().Throw(); + + // Note: Whitespace-only strings are technically valid queue names + // The underlying CheckNonEmpty only validates null and empty strings + // A whitespace queue name would create a valid (though odd) memory-mapped file + } + + [Fact] + [TestBeforeAfter] + public void MessageHeader_CorrectSize() + { + // Verify MessageHeader size is 8 bytes for C#/C++ interop compatibility + unsafe + { + sizeof(MessageHeader).Should().Be(8, "MessageHeader must be 8 bytes for C++ interop"); + } + } + + [Fact] + [TestBeforeAfter] + public void QueueHeader_CorrectSize() + { + // Verify QueueHeader size for C#/C++ interop compatibility + unsafe + { + sizeof(QueueHeader).Should().Be(32, "QueueHeader must be 32 bytes for C++ interop"); + } + } + private IPublisher CreatePublisher(long capacity) => queueFactory.CreatePublisher(new("qn", fixture.Path, capacity)); diff --git a/src/Interprocess.Tests/SemaphoreTests.cs b/src/Interprocess.Tests/SemaphoreTests.cs index 327da9c..22499a3 100644 --- a/src/Interprocess.Tests/SemaphoreTests.cs +++ b/src/Interprocess.Tests/SemaphoreTests.cs @@ -1,5 +1,6 @@ using Cloudtoid.Interprocess.Semaphore.Linux; using Cloudtoid.Interprocess.Semaphore.MacOS; +using Cloudtoid.Interprocess.Semaphore.Windows; namespace Cloudtoid.Interprocess.Tests; @@ -118,4 +119,195 @@ public void CanReuseSameSemaphoreNameMacOS() sem.Release(); } } + + [Fact(Platforms = Platform.Windows)] + [TestBeforeAfter] + public async Task Semaphore_MultipleWaiters_AllReleasedWindowsAsync() + { + // Test multiple threads waiting, then all get released + const int waiterCount = 10; + using var sem = new SemaphoreWindows("multi-wait-test"); + + var tasks = new Task[waiterCount]; + using var startBarrier = new Barrier(waiterCount + 1); + + for (int i = 0; i < waiterCount; i++) + { + tasks[i] = Task.Run(() => + { + startBarrier.SignalAndWait(); // Wait for all tasks to be ready + return sem.Wait(5000); // 5 second timeout + }); + } + + startBarrier.SignalAndWait(); // Release all tasks to start waiting + + // Give them a moment to start waiting + await Task.Delay(100); + + // Release all waiters + for (int i = 0; i < waiterCount; i++) + sem.Release(); + + var results = await Task.WhenAll(tasks); + + // All should have succeeded + results.Should().AllSatisfy(r => r.Should().BeTrue()); + } + + [Fact(Platforms = Platform.Linux | Platform.FreeBSD)] + [TestBeforeAfter] + public async Task Semaphore_MultipleWaiters_AllReleasedLinuxAsync() + { + const int waiterCount = 10; + using var sem = new SemaphoreLinux("multi-wait-test-linux", deleteOnDispose: true); + + var tasks = new Task[waiterCount]; + using var startBarrier = new Barrier(waiterCount + 1); + + for (int i = 0; i < waiterCount; i++) + { + tasks[i] = Task.Run(() => + { + startBarrier.SignalAndWait(); + return sem.Wait(5000); + }); + } + + startBarrier.SignalAndWait(); + await Task.Delay(100); + + for (int i = 0; i < waiterCount; i++) + sem.Release(); + + var results = await Task.WhenAll(tasks); + results.Should().AllSatisfy(r => r.Should().BeTrue()); + } + + [Fact(Platforms = Platform.OSX)] + [TestBeforeAfter] + public async Task Semaphore_MultipleWaiters_AllReleasedMacOSAsync() + { + const int waiterCount = 10; + using var sem = new SemaphoreMacOS("multi-wait-test-macos", deleteOnDispose: true); + + var tasks = new Task[waiterCount]; + using var startBarrier = new Barrier(waiterCount + 1); + + for (int i = 0; i < waiterCount; i++) + { + tasks[i] = Task.Run(() => + { + startBarrier.SignalAndWait(); + return sem.Wait(5000); + }); + } + + startBarrier.SignalAndWait(); + await Task.Delay(100); + + for (int i = 0; i < waiterCount; i++) + sem.Release(); + + var results = await Task.WhenAll(tasks); + results.Should().AllSatisfy(r => r.Should().BeTrue()); + } + + [Fact(Platforms = Platform.Windows)] + [TestBeforeAfter] + public void Semaphore_TimeoutBehaviorWindows() + { + // Test wait timeout scenarios + using var sem = new SemaphoreWindows("timeout-test"); + + // Wait with immediate timeout (0) - should fail immediately + sem.Wait(0).Should().BeFalse(); + + // Wait with short timeout (100ms) - should fail after timeout + var sw = System.Diagnostics.Stopwatch.StartNew(); + sem.Wait(100).Should().BeFalse(); + sw.Stop(); + sw.ElapsedMilliseconds.Should().BeGreaterOrEqualTo(90); // Allow some tolerance + + // Release and wait with timeout - should succeed immediately + sem.Release(); + sw.Restart(); + sem.Wait(1000).Should().BeTrue(); + sw.Stop(); + sw.ElapsedMilliseconds.Should().BeLessThan(100); // Should be fast + } + + [Fact(Platforms = Platform.Linux | Platform.FreeBSD)] + [TestBeforeAfter] + public void Semaphore_TimeoutBehaviorLinux() + { + using var sem = new SemaphoreLinux("timeout-test-linux", deleteOnDispose: true); + + sem.Wait(0).Should().BeFalse(); + + var sw = System.Diagnostics.Stopwatch.StartNew(); + sem.Wait(100).Should().BeFalse(); + sw.Stop(); + sw.ElapsedMilliseconds.Should().BeGreaterOrEqualTo(90); + + sem.Release(); + sw.Restart(); + sem.Wait(1000).Should().BeTrue(); + sw.Stop(); + sw.ElapsedMilliseconds.Should().BeLessThan(100); + } + + [Fact(Platforms = Platform.OSX)] + [TestBeforeAfter] + public void Semaphore_TimeoutBehaviorMacOS() + { + using var sem = new SemaphoreMacOS("timeout-test-macos", deleteOnDispose: true); + + sem.Wait(0).Should().BeFalse(); + + var sw = System.Diagnostics.Stopwatch.StartNew(); + sem.Wait(100).Should().BeFalse(); + sw.Stop(); + sw.ElapsedMilliseconds.Should().BeGreaterOrEqualTo(90); + + sem.Release(); + sw.Restart(); + sem.Wait(1000).Should().BeTrue(); + sw.Stop(); + sw.ElapsedMilliseconds.Should().BeLessThan(100); + } + + [Fact(Platforms = Platform.Windows)] + [TestBeforeAfter] + public async Task Semaphore_StressTest_ManyReleaseAndWaitAsync() + { + // Stress test with many rapid release/wait operations + const int iterations = 1000; + using var sem = new SemaphoreWindows("stress-test"); + + var producer = Task.Run(async () => + { + for (int i = 0; i < iterations; i++) + { + sem.Release(); + if (i % 10 == 0) + await Task.Delay(1); + } + }); + + var consumer = Task.Run(() => + { + int consumed = 0; + while (consumed < iterations) + { + if (sem.Wait(100)) + consumed++; + } + return consumed; + }); + + await Task.WhenAll(producer, consumer); + var result = await consumer; + result.Should().Be(iterations); + } } \ No newline at end of file diff --git a/src/Interprocess.sln b/src/Interprocess.sln index dcfe9bb..fe8438c 100644 --- a/src/Interprocess.sln +++ b/src/Interprocess.sln @@ -1,7 +1,7 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 16 -VisualStudioVersion = 16.0.30320.27 +# Visual Studio Version 17 +VisualStudioVersion = 17.14.36603.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Interprocess", "Interprocess\Interprocess.csproj", "{25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}" EndProject @@ -11,9 +11,9 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Interprocess.Benchmark", "I EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Sample", "Sample", "{B95A6F0E-6A43-4F1B-8698-125BFD44716F}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Publisher", "Sample\Publisher\Publisher.csproj", "{FEB59965-4F4B-4DFA-B3A7-59A990DE0143}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Publisher", "Sample\csharp\Publisher\Publisher.csproj", "{FEB59965-4F4B-4DFA-B3A7-59A990DE0143}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Subscriber", "Sample\Subscriber\Subscriber.csproj", "{D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Subscriber", "Sample\csharp\Subscriber\Subscriber.csproj", "{D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Build", "Build", "{A178B233-238B-4BCB-8164-EA649C8FBB8D}" ProjectSection(SolutionItems) = preProject @@ -23,39 +23,192 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Build", "Build", "{A178B233 ..\.github\workflows\publish.yml = ..\.github\workflows\publish.yml EndProjectSection EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Interprocess.Native.Static", "Interprocess.Native.Static\Interprocess.Native.Static.vcxproj", "{A31EA597-838E-4445-953A-4CBDB01519F2}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Interprocess.Native.Static.Tests", "Interprocess.Native.Static.Tests\Interprocess.Native.Static.Tests.vcxproj", "{3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "csharp", "csharp", "{02EA681E-C7D8-13C7-8484-4AC65E1B71E8}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "cpp", "cpp", "{45B416DB-3784-424C-9D61-299C78C77FDE}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Producer", "Sample\cpp\Producer\Producer.vcxproj", "{D32F5CFD-AC27-4956-A9DB-B36919C9D12B}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Consumer", "Sample\cpp\Consumer\Consumer.vcxproj", "{891953FC-A67C-414B-98D5-039E437BEB6B}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "RangeProducer", "Sample\cpp\RangeProducer\RangeProducer.vcxproj", "{8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SumConsumer", "Sample\cpp\SumConsumer\SumConsumer.vcxproj", "{7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SumConsumer", "Sample\csharp\SumConsumer\SumConsumer.csproj", "{517F4B7C-0634-4599-85CD-039964A668E3}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Debug|Any CPU.Build.0 = Debug|Any CPU + {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Debug|x64.ActiveCfg = Debug|Any CPU + {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Debug|x64.Build.0 = Debug|Any CPU + {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Debug|x86.ActiveCfg = Debug|Any CPU + {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Debug|x86.Build.0 = Debug|Any CPU {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Release|Any CPU.ActiveCfg = Release|Any CPU {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Release|Any CPU.Build.0 = Release|Any CPU + {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Release|x64.ActiveCfg = Release|Any CPU + {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Release|x64.Build.0 = Release|Any CPU + {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Release|x86.ActiveCfg = Release|Any CPU + {25AEAFB3-E9CD-4B51-B3B3-E706BAC1FA23}.Release|x86.Build.0 = Release|Any CPU {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Debug|Any CPU.Build.0 = Debug|Any CPU + {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Debug|x64.ActiveCfg = Debug|Any CPU + {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Debug|x64.Build.0 = Debug|Any CPU + {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Debug|x86.ActiveCfg = Debug|Any CPU + {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Debug|x86.Build.0 = Debug|Any CPU {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Release|Any CPU.ActiveCfg = Release|Any CPU {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Release|Any CPU.Build.0 = Release|Any CPU + {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Release|x64.ActiveCfg = Release|Any CPU + {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Release|x64.Build.0 = Release|Any CPU + {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Release|x86.ActiveCfg = Release|Any CPU + {743B7E36-FCB8-427A-ADBA-9490C52BD4E0}.Release|x86.Build.0 = Release|Any CPU {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Debug|Any CPU.Build.0 = Debug|Any CPU + {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Debug|x64.ActiveCfg = Debug|Any CPU + {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Debug|x64.Build.0 = Debug|Any CPU + {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Debug|x86.ActiveCfg = Debug|Any CPU + {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Debug|x86.Build.0 = Debug|Any CPU {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Release|Any CPU.ActiveCfg = Release|Any CPU {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Release|Any CPU.Build.0 = Release|Any CPU + {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Release|x64.ActiveCfg = Release|Any CPU + {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Release|x64.Build.0 = Release|Any CPU + {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Release|x86.ActiveCfg = Release|Any CPU + {EA162596-E4DD-46AB-BDEA-90B31B4ED425}.Release|x86.Build.0 = Release|Any CPU {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Debug|x64.ActiveCfg = Debug|Any CPU + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Debug|x64.Build.0 = Debug|Any CPU + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Debug|x86.ActiveCfg = Debug|Any CPU + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Debug|x86.Build.0 = Debug|Any CPU {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Release|Any CPU.ActiveCfg = Release|Any CPU {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Release|Any CPU.Build.0 = Release|Any CPU + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Release|x64.ActiveCfg = Release|Any CPU + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Release|x64.Build.0 = Release|Any CPU + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Release|x86.ActiveCfg = Release|Any CPU + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143}.Release|x86.Build.0 = Release|Any CPU {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Debug|Any CPU.Build.0 = Debug|Any CPU + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Debug|x64.ActiveCfg = Debug|Any CPU + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Debug|x64.Build.0 = Debug|Any CPU + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Debug|x86.ActiveCfg = Debug|Any CPU + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Debug|x86.Build.0 = Debug|Any CPU {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Release|Any CPU.ActiveCfg = Release|Any CPU {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Release|Any CPU.Build.0 = Release|Any CPU + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Release|x64.ActiveCfg = Release|Any CPU + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Release|x64.Build.0 = Release|Any CPU + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Release|x86.ActiveCfg = Release|Any CPU + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76}.Release|x86.Build.0 = Release|Any CPU + {A31EA597-838E-4445-953A-4CBDB01519F2}.Debug|Any CPU.ActiveCfg = Debug|x64 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Debug|Any CPU.Build.0 = Debug|x64 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Debug|x64.ActiveCfg = Debug|x64 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Debug|x64.Build.0 = Debug|x64 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Debug|x86.ActiveCfg = Debug|Win32 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Debug|x86.Build.0 = Debug|Win32 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Release|Any CPU.ActiveCfg = Release|x64 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Release|Any CPU.Build.0 = Release|x64 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Release|x64.ActiveCfg = Release|x64 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Release|x64.Build.0 = Release|x64 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Release|x86.ActiveCfg = Release|Win32 + {A31EA597-838E-4445-953A-4CBDB01519F2}.Release|x86.Build.0 = Release|Win32 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Debug|Any CPU.ActiveCfg = Debug|x64 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Debug|Any CPU.Build.0 = Debug|x64 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Debug|x64.ActiveCfg = Debug|x64 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Debug|x64.Build.0 = Debug|x64 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Debug|x86.ActiveCfg = Debug|Win32 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Debug|x86.Build.0 = Debug|Win32 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Release|Any CPU.ActiveCfg = Release|x64 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Release|Any CPU.Build.0 = Release|x64 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Release|x64.ActiveCfg = Release|x64 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Release|x64.Build.0 = Release|x64 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Release|x86.ActiveCfg = Release|Win32 + {3A5B0009-EAAE-4C4B-BA73-30E58D3DBC87}.Release|x86.Build.0 = Release|Win32 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Debug|Any CPU.ActiveCfg = Debug|x64 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Debug|Any CPU.Build.0 = Debug|x64 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Debug|x64.ActiveCfg = Debug|x64 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Debug|x64.Build.0 = Debug|x64 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Debug|x86.ActiveCfg = Debug|Win32 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Debug|x86.Build.0 = Debug|Win32 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Release|Any CPU.ActiveCfg = Release|x64 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Release|Any CPU.Build.0 = Release|x64 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Release|x64.ActiveCfg = Release|x64 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Release|x64.Build.0 = Release|x64 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Release|x86.ActiveCfg = Release|Win32 + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B}.Release|x86.Build.0 = Release|Win32 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Debug|Any CPU.ActiveCfg = Debug|x64 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Debug|Any CPU.Build.0 = Debug|x64 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Debug|x64.ActiveCfg = Debug|x64 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Debug|x64.Build.0 = Debug|x64 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Debug|x86.ActiveCfg = Debug|Win32 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Debug|x86.Build.0 = Debug|Win32 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Release|Any CPU.ActiveCfg = Release|x64 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Release|Any CPU.Build.0 = Release|x64 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Release|x64.ActiveCfg = Release|x64 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Release|x64.Build.0 = Release|x64 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Release|x86.ActiveCfg = Release|Win32 + {891953FC-A67C-414B-98D5-039E437BEB6B}.Release|x86.Build.0 = Release|Win32 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Debug|Any CPU.ActiveCfg = Debug|x64 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Debug|Any CPU.Build.0 = Debug|x64 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Debug|x64.ActiveCfg = Debug|x64 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Debug|x64.Build.0 = Debug|x64 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Debug|x86.ActiveCfg = Debug|Win32 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Debug|x86.Build.0 = Debug|Win32 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Release|Any CPU.ActiveCfg = Release|x64 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Release|Any CPU.Build.0 = Release|x64 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Release|x64.ActiveCfg = Release|x64 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Release|x64.Build.0 = Release|x64 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Release|x86.ActiveCfg = Release|Win32 + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F}.Release|x86.Build.0 = Release|Win32 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Debug|Any CPU.ActiveCfg = Debug|x64 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Debug|Any CPU.Build.0 = Debug|x64 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Debug|x64.ActiveCfg = Debug|x64 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Debug|x64.Build.0 = Debug|x64 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Debug|x86.ActiveCfg = Debug|Win32 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Debug|x86.Build.0 = Debug|Win32 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Release|Any CPU.ActiveCfg = Release|x64 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Release|Any CPU.Build.0 = Release|x64 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Release|x64.ActiveCfg = Release|x64 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Release|x64.Build.0 = Release|x64 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Release|x86.ActiveCfg = Release|Win32 + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A}.Release|x86.Build.0 = Release|Win32 + {517F4B7C-0634-4599-85CD-039964A668E3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Debug|Any CPU.Build.0 = Debug|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Debug|x64.ActiveCfg = Debug|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Debug|x64.Build.0 = Debug|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Debug|x86.ActiveCfg = Debug|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Debug|x86.Build.0 = Debug|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Release|Any CPU.ActiveCfg = Release|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Release|Any CPU.Build.0 = Release|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Release|x64.ActiveCfg = Release|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Release|x64.Build.0 = Release|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Release|x86.ActiveCfg = Release|Any CPU + {517F4B7C-0634-4599-85CD-039964A668E3}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution - {FEB59965-4F4B-4DFA-B3A7-59A990DE0143} = {B95A6F0E-6A43-4F1B-8698-125BFD44716F} - {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76} = {B95A6F0E-6A43-4F1B-8698-125BFD44716F} + {FEB59965-4F4B-4DFA-B3A7-59A990DE0143} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8} + {D19F0C91-DEEE-4BC1-B5FC-0F5ACCE09E76} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8} + {02EA681E-C7D8-13C7-8484-4AC65E1B71E8} = {B95A6F0E-6A43-4F1B-8698-125BFD44716F} + {45B416DB-3784-424C-9D61-299C78C77FDE} = {B95A6F0E-6A43-4F1B-8698-125BFD44716F} + {D32F5CFD-AC27-4956-A9DB-B36919C9D12B} = {45B416DB-3784-424C-9D61-299C78C77FDE} + {891953FC-A67C-414B-98D5-039E437BEB6B} = {45B416DB-3784-424C-9D61-299C78C77FDE} + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F} = {45B416DB-3784-424C-9D61-299C78C77FDE} + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A} = {45B416DB-3784-424C-9D61-299C78C77FDE} + {517F4B7C-0634-4599-85CD-039964A668E3} = {02EA681E-C7D8-13C7-8484-4AC65E1B71E8} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {144D02E1-C849-4099-B7D4-478B4095101A} diff --git a/src/Sample/Publisher/Program.cs b/src/Sample/Publisher/Program.cs deleted file mode 100644 index 9e4cd7a..0000000 --- a/src/Sample/Publisher/Program.cs +++ /dev/null @@ -1,41 +0,0 @@ -using Cloudtoid.Interprocess; - -namespace Publisher; - -internal static partial class Program -{ - internal static async Task Main() - { - // Set up an optional logger factory to redirect the traces to he console - - using var loggerFactory = LoggerFactory.Create(builder => builder.AddConsole()); - var logger = loggerFactory.CreateLogger("Publisher"); - - // Create the queue factory. If you are not interested in tracing the internals of - // the queue then don't pass in a loggerFactory - - var factory = new QueueFactory(loggerFactory); - - // Create a message queue publisher - - var options = new QueueOptions( - queueName: "sample-queue", - capacity: 1024 * 1024); - - using var publisher = factory.CreatePublisher(options); - - // Enqueue messages - - int i = 0; - while (true) - { - if (publisher.TryEnqueue([(byte)(i % 256)])) - LogEnqueue(logger, i++); - else - await Task.Delay(100); - } - } - - [LoggerMessage(Level = LogLevel.Information, Message = "Enqueue #{i}")] - private static partial void LogEnqueue(ILogger logger, int i); -} \ No newline at end of file diff --git a/src/Sample/Subscriber/Program.cs b/src/Sample/Subscriber/Program.cs deleted file mode 100644 index 9ffaa90..0000000 --- a/src/Sample/Subscriber/Program.cs +++ /dev/null @@ -1,39 +0,0 @@ -using Cloudtoid.Interprocess; - -namespace Subscriber; - -internal static partial class Program -{ - internal static void Main() - { - // Set up an optional logger factory to redirect the traces to he console - - using var loggerFactory = LoggerFactory.Create(builder => builder.AddConsole()); - var logger = loggerFactory.CreateLogger("Subscriber"); - - // Create the queue factory. If you are not interested in tracing the internals of - // the queue then don't pass in a loggerFactory - - var factory = new QueueFactory(loggerFactory); - - // Create a message queue publisher - - var options = new QueueOptions( - queueName: "sample-queue", - capacity: 1024 * 1024); - - using var subscriber = factory.CreateSubscriber(options); - - // Dequeue messages - var messageBuffer = new byte[1]; - - while (true) - { - if (subscriber.TryDequeue(messageBuffer, default, out var message)) - LogDequeue(logger, messageBuffer[0]); - } - } - - [LoggerMessage(Level = LogLevel.Information, Message = "Dequeue #{i}")] - private static partial void LogDequeue(ILogger logger, int i); -} \ No newline at end of file diff --git a/src/Sample/cpp/Consumer/Consumer.cpp b/src/Sample/cpp/Consumer/Consumer.cpp new file mode 100644 index 0000000..6cc3d2d --- /dev/null +++ b/src/Sample/cpp/Consumer/Consumer.cpp @@ -0,0 +1,186 @@ +#include +#include +#include +#include +#include +#include +#include +#include "QueueOptions.h" +#include "QueueFactory.h" +#include "ISubscriber.h" + +using namespace Cloudtoid::Interprocess; + +int main(int argc, char* argv[]) +{ + try + { + // Parse command line arguments for expected message count and optional queue name + int expectedMessageCount = 100; // Default expectation + std::string queueName = "sample-queue"; // Default queue name + + if (argc > 1) + { + expectedMessageCount = std::atoi(argv[1]); + if (expectedMessageCount <= 0) + { + std::cerr << "Error: Expected message count must be a positive integer" << std::endl; + std::cerr << "Usage: " << argv[0] << " [expected_message_count] [queue_name]" << std::endl; + return 1; + } + } + + if (argc > 2) + { + queueName = argv[2]; + } + + std::cout << "C++ Consumer starting..." << std::endl; + std::cout << "Expected message count: " << expectedMessageCount << std::endl; + + // Mirror the C# subscriber configuration + const size_t capacity = 1024 * 1024; // 1MB like C# version + + // Convert string to wstring for QueueOptions + const std::wstring wQueueName(queueName.begin(), queueName.end()); + + // Create queue options - using 2-parameter constructor to match C# behavior exactly + QueueOptions options(wQueueName, capacity); + + // Create subscriber using the QueueFactory + QueueFactory factory; + std::unique_ptr subscriber(factory.CreateSubscriber(options)); + + std::cout << "Connected to queue: " << queueName << std::endl; + std::cout << "Capacity: " << capacity << " bytes" << std::endl; + std::cout << "Waiting for messages..." << std::endl; + std::cout << std::endl; + + // Buffer to receive data - single byte like C# version + std::vector buffer(1); + std::span bufferSpan(buffer); + std::span message; + + int messageCount = 0; + bool sequenceError = false; + std::vector receivedValues; // Track all received values for final validation + auto startTime = std::chrono::steady_clock::now(); + auto lastMessageTime = startTime; + + while (true) + { + // Try to dequeue a message + if (subscriber->TryDequeue(bufferSpan, message)) + { + if (!message.empty()) + { + unsigned char receivedByte = message[0]; + receivedValues.push_back(static_cast(receivedByte)); + + // Validate sequential value (0-99, repeating) + int expectedByte = messageCount % 100; + if (receivedByte != expectedByte) + { + if (!sequenceError) // Only log first sequence error + { + std::cout << "⚠ SEQUENCE ERROR at message " << (messageCount + 1) + << ": expected " << expectedByte << ", got " << static_cast(receivedByte) << std::endl; + sequenceError = true; + } + } + + messageCount++; + lastMessageTime = std::chrono::steady_clock::now(); + + // Show progress every 10 messages or at expected completion + if (messageCount % 10 == 0 || messageCount == expectedMessageCount) + { + auto elapsed = std::chrono::duration_cast(lastMessageTime - startTime).count(); + double throughput = elapsed > 0 ? (messageCount * 1000.0) / elapsed : 0.0; + + std::cout << "Received " << messageCount << "/" << expectedMessageCount + << " messages (Value: " << static_cast(receivedByte) + << ", Expected: " << expectedByte + << ", Throughput: " << throughput << " msg/s)" << std::endl; + } + + // Break if we've received all expected messages + if (messageCount >= expectedMessageCount) + { + std::cout << std::endl; + std::cout << "=== C++ Consumer Finished ===" << std::endl; + break; + } + } + } + else + { + // No message available, check timeout + auto currentTime = std::chrono::steady_clock::now(); + auto timeSinceLastMessage = std::chrono::duration_cast(currentTime - lastMessageTime).count(); + auto timeSinceStart = std::chrono::duration_cast(currentTime - startTime).count(); + + // If no messages for 3 seconds and we've received some, assume producer is done + // OR if no messages for 10 seconds total (in case producer never starts) + if ((messageCount > 0 && timeSinceLastMessage >= 3) || timeSinceStart >= 10) + { + std::cout << std::endl; + if (messageCount == 0) + { + std::cout << "=== C++ Consumer Timeout (No messages received) ===" << std::endl; + } + else + { + std::cout << "=== C++ Consumer Finished ===" << std::endl; + } + break; + } + + // Brief sleep to avoid busy waiting + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + + // Print final summary + auto endTime = std::chrono::steady_clock::now(); + auto totalDuration = std::chrono::duration_cast(endTime - startTime).count(); + double avgThroughput = totalDuration > 0 ? (messageCount * 1000.0) / totalDuration : 0.0; + + std::cout << "Total runtime: " << totalDuration << " ms" << std::endl; + std::cout << "Total messages received: " << messageCount << std::endl; + std::cout << "Expected messages: " << expectedMessageCount << std::endl; + std::cout << "Average throughput: " << avgThroughput << " msg/s" << std::endl; + + // Validate final sequence integrity + if (messageCount == expectedMessageCount && !sequenceError) + { + std::cout << "✓ SUCCESS: Perfect message integrity! All " << messageCount << " messages received in correct sequence (0-" << ((expectedMessageCount - 1) % 100) << ")" << std::endl; + } + else if (messageCount != expectedMessageCount) + { + std::cout << "✗ ERROR: Message count mismatch. Expected " << expectedMessageCount << ", received " << messageCount << std::endl; + } + else if (sequenceError) + { + std::cout << "✗ ERROR: Sequence validation failed. Messages received out of order or with incorrect values." << std::endl; + } + + std::cout << std::endl; + } + catch (const std::exception& ex) + { + std::cerr << "Error: " << ex.what() << std::endl; + std::cout << std::endl; + std::cout << "This might happen if:" << std::endl; + std::cout << "1. No message sender has connected to the queue yet" << std::endl; + std::cout << "2. There's a permissions issue with the memory-mapped file" << std::endl; + std::cout << "3. The queue configuration doesn't match between processes" << std::endl; + std::cout << std::endl; + std::cout << "3. The queue configuration doesn't match between processes" << std::endl; + std::cout << std::endl; + std::cout << "Make sure the message producer is running with compatible settings." << std::endl; + return 1; + } + + return 0; +} diff --git a/src/Sample/cpp/Consumer/Consumer.vcxproj b/src/Sample/cpp/Consumer/Consumer.vcxproj new file mode 100644 index 0000000..b211014 --- /dev/null +++ b/src/Sample/cpp/Consumer/Consumer.vcxproj @@ -0,0 +1,143 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 17.0 + Win32Proj + {891953fc-a67c-414b-98d5-039e437beb6b} + Consumer + 10.0 + + + + Application + true + v143 + Unicode + stdcpp20 + + + Application + false + v143 + true + Unicode + stdcpp20 + + + Application + true + v143 + Unicode + stdcpp20 + + + Application + false + v143 + true + Unicode + stdcpp20 + + + + + + + + + + + + + + + + + + + + + + Level3 + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + + + + + \ No newline at end of file diff --git a/src/Sample/cpp/Consumer/Consumer.vcxproj.filters b/src/Sample/cpp/Consumer/Consumer.vcxproj.filters new file mode 100644 index 0000000..a200e4d --- /dev/null +++ b/src/Sample/cpp/Consumer/Consumer.vcxproj.filters @@ -0,0 +1,22 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Source Files + + + \ No newline at end of file diff --git a/src/Sample/cpp/Producer/Producer.cpp b/src/Sample/cpp/Producer/Producer.cpp new file mode 100644 index 0000000..12e2293 --- /dev/null +++ b/src/Sample/cpp/Producer/Producer.cpp @@ -0,0 +1,117 @@ +#include +#include +#include +#include +#include +#include +#include +#include "QueueOptions.h" +#include "QueueFactory.h" +#include "IPublisher.h" + +using namespace Cloudtoid::Interprocess; + +int main(int argc, char* argv[]) +{ + try + { + // Parse command line arguments for message count and optional queue name + int targetMessageCount = 1000; // Default to 1000 messages + std::string queueName = "sample-queue"; // Default queue name + + if (argc > 1) + { + targetMessageCount = std::atoi(argv[1]); + if (targetMessageCount <= 0) + { + std::cerr << "Error: Message count must be a positive integer" << std::endl; + std::cerr << "Usage: " << argv[0] << " [message_count] [queue_name]" << std::endl; + return 1; + } + } + + if (argc > 2) + { + queueName = argv[2]; + } + + std::cout << "C++ Producer starting..." << std::endl; + std::cout << "Target message count: " << targetMessageCount << std::endl; + + // Mirror the C# publisher configuration + const size_t capacity = 1024 * 1024; // 1MB like C# version + + // Convert string to wstring for QueueOptions + const std::wstring wQueueName(queueName.begin(), queueName.end()); + + // Create queue options - using 2-parameter constructor to match C# behavior exactly + QueueOptions options(wQueueName, capacity); + + // Create publisher using the QueueFactory + QueueFactory factory; + std::unique_ptr publisher(factory.CreatePublisher(options)); + + std::cout << "Created queue: " << queueName << std::endl; + std::cout << "Capacity: " << capacity << " bytes" << std::endl; + std::cout << "Starting to send " << targetMessageCount << " messages..." << std::endl; + std::cout << std::endl; + + int messageCount = 0; + auto startTime = std::chrono::steady_clock::now(); + + // Send exactly the target number of messages + while (messageCount < targetMessageCount) + { + // Create a message with sequential values 0-99, repeating if targetMessageCount > 100 + // This matches the C# publisher pattern exactly + unsigned char messageData = static_cast(messageCount % 100); + std::span message(&messageData, 1); + + // Try to enqueue the message + if (publisher->TryEnqueue(message)) + { + messageCount++; + + // Show progress every 100 messages or at key milestones + if (messageCount % 100 == 0 || messageCount == targetMessageCount) + { + auto currentTime = std::chrono::steady_clock::now(); + auto elapsed = std::chrono::duration_cast(currentTime - startTime).count(); + + double messagesPerSecond = elapsed > 0 ? static_cast(messageCount * 1000) / elapsed : 0.0; + std::cout << "Sent " << messageCount << "/" << targetMessageCount + << " messages (Current byte: " << static_cast(messageData) + << ", Throughput: " << messagesPerSecond << " msg/s)" << std::endl; + } + } + // queue said no, try again. + } + + // Print final summary + auto endTime = std::chrono::steady_clock::now(); + auto totalDuration = std::chrono::duration_cast(endTime - startTime).count(); + double avgThroughput = totalDuration > 0 ? static_cast(messageCount * 1000) / totalDuration : 0.0; + + std::cout << std::endl; + std::cout << "=== C++ Producer Finished ===" << std::endl; + std::cout << "Total runtime: " << totalDuration << " ms" << std::endl; + std::cout << "Total messages sent: " << messageCount << std::endl; + std::cout << "Target messages: " << targetMessageCount << std::endl; + std::cout << "Average throughput: " << avgThroughput << " msg/s" << std::endl; + std::cout << std::endl; + } + catch (const std::exception& ex) + { + std::cerr << "Error: " << ex.what() << std::endl; + std::cout << std::endl; + std::cout << "This might happen if:" << std::endl; + std::cout << "1. Unable to create the memory-mapped file" << std::endl; + std::cout << "2. Permissions issue with the queue creation" << std::endl; + std::cout << "3. Another process has the queue locked" << std::endl; + std::cout << std::endl; + std::cout << "Check that no other process is using the same queue name with conflicting settings." << std::endl; + return 1; + } + + return 0; +} diff --git a/src/Sample/cpp/Producer/Producer.vcxproj b/src/Sample/cpp/Producer/Producer.vcxproj new file mode 100644 index 0000000..59fef0a --- /dev/null +++ b/src/Sample/cpp/Producer/Producer.vcxproj @@ -0,0 +1,143 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 17.0 + Win32Proj + {d32f5cfd-ac27-4956-a9db-b36919c9d12b} + Producer + 10.0 + + + + Application + true + v143 + Unicode + stdcpp20 + + + Application + false + v143 + true + Unicode + stdcpp20 + + + Application + true + v143 + Unicode + stdcpp20 + + + Application + false + v143 + true + Unicode + stdcpp20 + + + + + + + + + + + + + + + + + + + + + + Level3 + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + + + + + \ No newline at end of file diff --git a/src/Sample/cpp/Producer/Producer.vcxproj.filters b/src/Sample/cpp/Producer/Producer.vcxproj.filters new file mode 100644 index 0000000..b1639a9 --- /dev/null +++ b/src/Sample/cpp/Producer/Producer.vcxproj.filters @@ -0,0 +1,22 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Source Files + + + \ No newline at end of file diff --git a/src/Sample/cpp/RangeProducer/README.md b/src/Sample/cpp/RangeProducer/README.md new file mode 100644 index 0000000..42eabdb --- /dev/null +++ b/src/Sample/cpp/RangeProducer/README.md @@ -0,0 +1 @@ +NOTE: Waits for an event to be set before pushing values into the queue \ No newline at end of file diff --git a/src/Sample/cpp/RangeProducer/RangeProducer.cpp b/src/Sample/cpp/RangeProducer/RangeProducer.cpp new file mode 100644 index 0000000..4221896 --- /dev/null +++ b/src/Sample/cpp/RangeProducer/RangeProducer.cpp @@ -0,0 +1,160 @@ +#include +#include +#include +#include +#include +#include +#include + +#define NOMINMAX // Prevent windows.h from defining min/max macros +#include + +#include "QueueOptions.h" +#include "QueueFactory.h" +#include "IPublisher.h" + +using namespace Cloudtoid::Interprocess; + +// Custom producer that sends a specific range of values +// Usage: RangeProducer +// Example: RangeProducer 0 5 test-queue (sends values 0, 1, 2, 3, 4) + +int main(int argc, char* argv[]) +{ + try + { + if (argc < 4) + { + std::cerr << "Usage: " << argv[0] << " [event_name]" << std::endl; + std::cerr << "Example: " << argv[0] << " 0 5 test-queue StartEvent" << std::endl; + return 1; + } + + int startValue = std::atoi(argv[1]); + int count = std::atoi(argv[2]); + std::string queueName = argv[3]; + std::string eventName = (argc > 4) ? argv[4] : ""; + + if (count <= 0) + { + std::cerr << "Error: Count must be a positive integer" << std::endl; + return 1; + } + + std::cout << "Range Producer starting..." << std::endl; + std::cout << "Start value: " << startValue << std::endl; + std::cout << "Count: " << count << std::endl; + std::cout << "Queue: " << queueName << std::endl; + + // If event name is specified, wait for it to be signaled before proceeding + if (!eventName.empty()) + { + std::cout << "Waiting for start event: " << eventName << std::endl; + + // Open or create the named event + std::wstring wEventName(eventName.begin(), eventName.end()); + HANDLE hEvent = OpenEventW(SYNCHRONIZE, FALSE, wEventName.c_str()); + + if (hEvent == NULL) + { + // Event doesn't exist yet, create it (unsignaled) + hEvent = CreateEventW(NULL, TRUE, FALSE, wEventName.c_str()); + if (hEvent == NULL) + { + std::cerr << "Failed to create/open event: " << GetLastError() << std::endl; + return 1; + } + } + + std::cout << "Blocking on event..." << std::endl; + + // Wait for the event to be signaled (max 30 seconds) + DWORD waitResult = WaitForSingleObject(hEvent, 30000); + + if (waitResult == WAIT_OBJECT_0) + { + std::cout << "Event signaled! Starting production..." << std::endl; + } + else if (waitResult == WAIT_TIMEOUT) + { + std::cerr << "Timeout waiting for start event" << std::endl; + CloseHandle(hEvent); + return 1; + } + else + { + std::cerr << "Error waiting for event: " << GetLastError() << std::endl; + CloseHandle(hEvent); + return 1; + } + + CloseHandle(hEvent); + } + + const size_t capacity = 10 * 1024 * 1024; // 10MB for high concurrency + const std::wstring wQueueName(queueName.begin(), queueName.end()); + + QueueOptions options(wQueueName, capacity); + QueueFactory factory; + std::unique_ptr publisher(factory.CreatePublisher(options)); + + std::cout << "Connected to queue, sending values " << startValue + << " to " << (startValue + count - 1) << "..." << std::endl; + + int sent = 0; + auto startTime = std::chrono::steady_clock::now(); + + for (int i = 0; i < count; ++i) + { + int value = startValue + i; + std::span message(reinterpret_cast(&value), sizeof(int)); + + // Retry with timeout to detect actual failures + int retries = 0; + const int maxRetries = 5000; // 5 seconds at 1ms per retry + while (!publisher->TryEnqueue(message)) + { + if (++retries > maxRetries) + { + std::cerr << "FATAL: Failed to enqueue message after " << maxRetries << " retries" << std::endl; + std::cerr << "Sent " << sent << " out of " << count << " messages before failure" << std::endl; + return 2; // Different exit code for enqueue failure + } + // Queue full, run hard till we have it + } + + sent++; + std::cout << "Sent value: " << value << " (" << sent << "/" << count << ")" << std::endl; + } + + auto endTime = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(endTime - startTime).count(); + + // Verify we sent exactly what we expected + if (sent != count) + { + std::cerr << "FATAL: Message count mismatch! Expected " << count << " but sent " << sent << std::endl; + return 3; // Exit code for count mismatch + } + + std::cout << std::endl; + std::cout << "SUCCESS: Range Producer completed!" << std::endl; + std::cout << "Sent " << sent << " messages in " << duration << " ms" << std::endl; + std::cout << "Values sent: " << startValue << " to " << (startValue + count - 1) << std::endl; + + // Calculate and display sum for verification + int sum = 0; + for (int i = 0; i < count; ++i) + { + sum += (startValue + i); + } + std::cout << "Sum of sent values: " << sum << std::endl; + } + catch (const std::exception& ex) + { + std::cerr << "Error: " << ex.what() << std::endl; + return 1; + } + + return 0; +} diff --git a/src/Sample/cpp/RangeProducer/RangeProducer.vcxproj b/src/Sample/cpp/RangeProducer/RangeProducer.vcxproj new file mode 100644 index 0000000..a14dce0 --- /dev/null +++ b/src/Sample/cpp/RangeProducer/RangeProducer.vcxproj @@ -0,0 +1,143 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 17.0 + Win32Proj + {8A9F5CD2-BC38-4A5D-9E1F-7C8B4D6E3A2F} + RangeProducer + 10.0 + + + + Application + true + v143 + Unicode + stdcpp20 + + + Application + false + v143 + true + Unicode + stdcpp20 + + + Application + true + v143 + Unicode + stdcpp20 + + + Application + false + v143 + true + Unicode + stdcpp20 + + + + + + + + + + + + + + + + + + + + + + Level3 + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + + + + + diff --git a/src/Sample/cpp/SumConsumer/SumConsumer.cpp b/src/Sample/cpp/SumConsumer/SumConsumer.cpp new file mode 100644 index 0000000..910512e --- /dev/null +++ b/src/Sample/cpp/SumConsumer/SumConsumer.cpp @@ -0,0 +1,208 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define NOMINMAX // Prevent windows.h from defining min/max macros +#include + +#include "QueueOptions.h" +#include "QueueFactory.h" +#include "ISubscriber.h" + +using namespace Cloudtoid::Interprocess; + +// Custom consumer that validates sum of received values +// Usage: SumConsumer [timeout_seconds] +// Example: SumConsumer 15 105 test-queue 30 + +int main(int argc, char* argv[]) +{ + try + { + if (argc < 4) + { + std::cerr << "Usage: " << argv[0] << " [timeout_seconds] [event_name]" << std::endl; + std::cerr << "Example: " << argv[0] << " 15 105 test-queue 30 StartEvent" << std::endl; + return 1; + } + + int expectedCount = std::atoi(argv[1]); + int expectedSum = std::atoi(argv[2]); + std::string queueName = argv[3]; + int timeoutSeconds = (argc > 4) ? std::atoi(argv[4]) : 30; + std::string eventName = (argc > 5) ? argv[5] : ""; + + if (expectedCount <= 0) + { + std::cerr << "Error: Expected count must be a positive integer" << std::endl; + return 1; + } + + std::cout << "Sum Consumer starting..." << std::endl; + std::cout << "Expected message count: " << expectedCount << std::endl; + std::cout << "Expected sum: " << expectedSum << std::endl; + std::cout << "Queue: " << queueName << std::endl; + std::cout << "Timeout: " << timeoutSeconds << " seconds" << std::endl; + std::cout << std::endl; + + const size_t capacity = 10 * 1024 * 1024; // 10MB for high concurrency + const std::wstring wQueueName(queueName.begin(), queueName.end()); + + QueueOptions options(wQueueName, capacity); + QueueFactory factory; + std::unique_ptr subscriber(factory.CreateSubscriber(options)); + + std::cout << "Connected to queue" << std::endl; + + // If event name specified, signal it to start all producers + HANDLE hEvent = NULL; + if (!eventName.empty()) + { + std::cout << "Signaling start event: " << eventName << std::endl; + + std::wstring wEventName(eventName.begin(), eventName.end()); + + // Create the event in signaled state (manual-reset so all waiters are released) + hEvent = CreateEventW(NULL, TRUE, FALSE, wEventName.c_str()); + if (hEvent == NULL) + { + std::cerr << "Failed to create event: " << GetLastError() << std::endl; + return 1; + } + + // Signal the event to release all waiting producers + if (!SetEvent(hEvent)) + { + std::cerr << "Failed to signal event: " << GetLastError() << std::endl; + CloseHandle(hEvent); + return 1; + } + + std::cout << "Event signaled - all producers starting now!" << std::endl; + } + + std::cout << "Waiting for messages..." << std::endl; + std::cout << std::endl; + + std::vector buffer(sizeof(int)); + std::span bufferSpan(buffer); + std::span message; + + int messageCount = 0; + int actualSum = 0; + std::vector receivedValues; + std::set uniqueValues; + + auto startTime = std::chrono::steady_clock::now(); + auto lastMessageTime = startTime; + auto timeoutDuration = std::chrono::seconds(timeoutSeconds); + + while (messageCount < expectedCount) + { + if (subscriber->TryDequeue(bufferSpan, message)) + { + if (message.size() >= sizeof(int)) + { + int intValue = *reinterpret_cast(message.data()); + + receivedValues.push_back(intValue); + uniqueValues.insert(intValue); + actualSum += intValue; + messageCount++; + lastMessageTime = std::chrono::steady_clock::now(); + + std::cout << "Received value: " << intValue + << " (message " << messageCount << "/" << expectedCount + << ", running sum: " << actualSum << ")" << std::endl; + } + } + else + { + // Check for timeout + auto now = std::chrono::steady_clock::now(); + auto timeSinceLastMessage = std::chrono::duration_cast(now - lastMessageTime); + + if (timeSinceLastMessage > timeoutDuration) + { + std::cout << std::endl; + std::cout << "⚠ TIMEOUT: No messages received for " << timeoutSeconds << " seconds" << std::endl; + break; + } + + // Small sleep to avoid busy-waiting + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + + auto endTime = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(endTime - startTime).count(); + + std::cout << std::endl; + std::cout << "=== Sum Consumer Results ===" << std::endl; + std::cout << "Total runtime: " << duration << " ms" << std::endl; + std::cout << "Messages received: " << messageCount << " / " << expectedCount << std::endl; + std::cout << "Actual sum: " << actualSum << std::endl; + std::cout << "Expected sum: " << expectedSum << std::endl; + std::cout << "Unique values received: " << uniqueValues.size() << std::endl; + + // Display all received values + std::cout << "Received values: ["; + for (size_t i = 0; i < receivedValues.size(); ++i) + { + std::cout << receivedValues[i]; + if (i < receivedValues.size() - 1) std::cout << ", "; + } + std::cout << "]" << std::endl; + std::cout << std::endl; + + // Validation + bool success = true; + std::string errorMsg; + + if (messageCount != expectedCount) + { + success = false; + errorMsg = "Message count mismatch"; + std::cout << "❌ FAILED: Expected " << expectedCount << " messages but received " << messageCount << std::endl; + } + else if (actualSum != expectedSum) + { + success = false; + errorMsg = "Sum mismatch"; + std::cout << "❌ FAILED: Expected sum " << expectedSum << " but got " << actualSum << std::endl; + std::cout << " Difference: " << (actualSum - expectedSum) << std::endl; + } + else + { + std::cout << "✅ SUCCESS: All messages received with correct sum!" << std::endl; + std::cout << " Perfect integrity - no data corruption or loss detected." << std::endl; + } + + std::cout << std::endl; + + // Check for duplicates (not necessarily an error, but interesting to know) + if (uniqueValues.size() != static_cast(messageCount)) + { + std::cout << "ℹ Note: " << (messageCount - uniqueValues.size()) << " duplicate value(s) detected" << std::endl; + std::cout << " This is expected when value ranges overlap." << std::endl; + } + + // Cleanup event handle + if (hEvent != NULL) + { + CloseHandle(hEvent); + } + + return success ? 0 : 1; + } + catch (const std::exception& ex) + { + std::cerr << "Error: " << ex.what() << std::endl; + return 1; + } +} diff --git a/src/Sample/cpp/SumConsumer/SumConsumer.vcxproj b/src/Sample/cpp/SumConsumer/SumConsumer.vcxproj new file mode 100644 index 0000000..e9e3b5c --- /dev/null +++ b/src/Sample/cpp/SumConsumer/SumConsumer.vcxproj @@ -0,0 +1,143 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 17.0 + Win32Proj + {7B8D3E4C-AF29-4C6F-8D2A-1E5F9B3C7D8A} + SumConsumer + 10.0 + + + + Application + true + v143 + Unicode + stdcpp20 + + + Application + false + v143 + true + Unicode + stdcpp20 + + + Application + true + v143 + Unicode + stdcpp20 + + + Application + false + v143 + true + Unicode + stdcpp20 + + + + + + + + + + + + + + + + + + + + + + Level3 + true + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + _DEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + Level3 + true + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + true + $(ProjectDir)..\..\..\Interprocess.Native.Static;%(AdditionalIncludeDirectories) + stdcpp20 + + + Console + true + + + + + + + + + diff --git a/src/Sample/csharp/Publisher/Program.cs b/src/Sample/csharp/Publisher/Program.cs new file mode 100644 index 0000000..a1204b3 --- /dev/null +++ b/src/Sample/csharp/Publisher/Program.cs @@ -0,0 +1,94 @@ +using Cloudtoid.Interprocess; + +namespace Publisher; + +internal static partial class Program +{ + internal static async Task Main(string[] args) + { + // Parse command line arguments for message count and optional queue name + int targetMessageCount = 100; // Default to 100 messages + string queueName = "sample-queue"; // Default queue name + + if (args.Length > 0) + { + if (!int.TryParse(args[0], out targetMessageCount) || targetMessageCount <= 0) + { + Console.WriteLine("Error: Message count must be a positive integer"); + Console.WriteLine( + $"Usage: {System.Diagnostics.Process.GetCurrentProcess().ProcessName} [message_count] [queue_name]"); + return; + } + } + + if (args.Length > 1) + queueName = args[1]; + + // Set up an optional logger factory to redirect the traces to he console + + using var loggerFactory = LoggerFactory.Create(builder => builder.AddConsole()); + var logger = loggerFactory.CreateLogger("Publisher"); + + // Create the queue factory. If you are not interested in tracing the internals of + // the queue then don't pass in a loggerFactory + + var factory = new QueueFactory(loggerFactory); + + // Create a message queue publisher + + var options = new QueueOptions( + queueName: queueName, + capacity: 1024 * 1024); + + using var publisher = factory.CreatePublisher(options); + + LogStart(logger, targetMessageCount); + + // Enqueue messages + int messageCount = 0; + var startTime = DateTime.UtcNow; + + while (messageCount < targetMessageCount) + { + // Send sequential values 0-99, repeating if targetMessageCount > 100 + byte value = (byte)(messageCount % 100); + + if (publisher.TryEnqueue([value])) + { + messageCount++; + + // Show progress every 10 messages or at completion + if (messageCount % 10 == 0 || messageCount == targetMessageCount) + LogEnqueue(logger, value, messageCount, targetMessageCount); + } + else + { + await Task.Delay(1); + } + } + + var endTime = DateTime.UtcNow; + var duration = (endTime - startTime).TotalMilliseconds; + var throughput = duration > 0 ? messageCount * 1000.0 / duration : 0.0; + + LogFinished(logger); + LogTotalSent(logger, messageCount); + LogThroughput(logger, throughput); + } + + [LoggerMessage(Level = LogLevel.Information, Message = "C# Publisher starting to send {TargetCount} messages...")] + private static partial void LogStart(ILogger logger, int targetCount); + + [LoggerMessage(Level = LogLevel.Information, + Message = "Sent {MessageCount}/{TargetCount} messages (Current value: {Value})")] + private static partial void LogEnqueue(ILogger logger, int value, int messageCount, int targetCount); + + [LoggerMessage(Level = LogLevel.Information, Message = "=== C# Publisher Finished ===")] + private static partial void LogFinished(ILogger logger); + + [LoggerMessage(Level = LogLevel.Information, Message = "Total messages sent: {MessageCount}")] + private static partial void LogTotalSent(ILogger logger, int messageCount); + + [LoggerMessage(Level = LogLevel.Information, Message = "Average throughput: {Throughput:F1} msg/s")] + private static partial void LogThroughput(ILogger logger, double throughput); +} \ No newline at end of file diff --git a/src/Sample/Publisher/Properties/launchSettings.json b/src/Sample/csharp/Publisher/Properties/launchSettings.json similarity index 100% rename from src/Sample/Publisher/Properties/launchSettings.json rename to src/Sample/csharp/Publisher/Properties/launchSettings.json diff --git a/src/Sample/Publisher/Publisher.csproj b/src/Sample/csharp/Publisher/Publisher.csproj similarity index 76% rename from src/Sample/Publisher/Publisher.csproj rename to src/Sample/csharp/Publisher/Publisher.csproj index db337d9..b6f6887 100644 --- a/src/Sample/Publisher/Publisher.csproj +++ b/src/Sample/csharp/Publisher/Publisher.csproj @@ -9,7 +9,7 @@ - + diff --git a/src/Sample/csharp/Subscriber/Program.cs b/src/Sample/csharp/Subscriber/Program.cs new file mode 100644 index 0000000..bd0b513 --- /dev/null +++ b/src/Sample/csharp/Subscriber/Program.cs @@ -0,0 +1,116 @@ +using Cloudtoid.Interprocess; + +namespace Subscriber; + +internal static partial class Program +{ + internal static void Main(string[] args) + { + // Parse command line arguments for optional queue name + string queueName = "sample-queue"; // Default queue name + + if (args.Length > 0) + queueName = args[0]; + // Set up an optional logger factory to redirect the traces to he console + + using var loggerFactory = LoggerFactory.Create(builder => builder.AddConsole()); + var logger = loggerFactory.CreateLogger("Subscriber"); + + // Create the queue factory. If you are not interested in tracing the internals of + // the queue then don't pass in a loggerFactory + + var factory = new QueueFactory(loggerFactory); + + // Create a message queue subscriber + + var options = new QueueOptions( + queueName: queueName, + capacity: 1024 * 1024); + + using var subscriber = factory.CreateSubscriber(options); + + // Dequeue messages + var messageBuffer = new byte[1]; + var messageCount = 0; + var lastMessageTime = DateTime.UtcNow; + var sequenceError = false; + + LogStart(logger); + + while (true) + { + if (subscriber.TryDequeue(messageBuffer, default, out var message)) + { + byte receivedByte = messageBuffer[0]; + + // Validate sequential value (0-99, repeating) + int expectedByte = messageCount % 100; + if (receivedByte != expectedByte) + { + // Only log first sequence error + if (!sequenceError) + { + LogSequenceError(logger, messageCount + 1, expectedByte, receivedByte); + sequenceError = true; + } + } + + messageCount++; + lastMessageTime = DateTime.UtcNow; + LogDequeue(logger, receivedByte, messageCount); + } + else + { + // If no messages for 3 seconds, assume producer is done + if (messageCount > 0 && (DateTime.UtcNow - lastMessageTime).TotalSeconds > 3) + { + LogFinished(logger); + LogTotalReceived(logger, messageCount); + + // Validate final sequence integrity + if (!sequenceError) + { + int maxValue = Math.Min((messageCount - 1) % 100, 99); + LogSequenceSuccess(logger, messageCount, maxValue); + } + else + { + LogSequenceFailure(logger); + } + + break; + } + + // Short sleep to avoid busy waiting + Thread.Sleep(10); + } + } + } + + [LoggerMessage(Level = LogLevel.Information, Message = "C# Subscriber started, waiting for messages...")] + private static partial void LogStart(ILogger logger); + + [LoggerMessage(Level = LogLevel.Information, Message = "=== C# Subscriber Finished ===")] + private static partial void LogFinished(ILogger logger); + + [LoggerMessage(Level = LogLevel.Information, Message = "Total messages received: {MessageCount}")] + private static partial void LogTotalReceived(ILogger logger, int messageCount); + + [LoggerMessage(Level = LogLevel.Information, Message = "Dequeue #{MessageCount}: {Value}")] + private static partial void LogDequeue(ILogger logger, int value, int messageCount); + + [LoggerMessage( + Level = LogLevel.Warning, + Message = "SEQUENCE ERROR at message {MessageNum}: expected {Expected}, got {Received}")] + private static partial void LogSequenceError(ILogger logger, int messageNum, int expected, int received); + + [LoggerMessage( + Level = LogLevel.Information, + Message = "Perfect message integrity! All {MessageCount} messages received in correct sequence (0-{MaxValue})")] + private static partial void LogSequenceSuccess(ILogger logger, int messageCount, int maxValue); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Sequence validation failed. Messages received out of order or with incorrect values.")] + private static partial void LogSequenceFailure(ILogger logger); +} \ No newline at end of file diff --git a/src/Sample/Subscriber/Properties/launchSettings.json b/src/Sample/csharp/Subscriber/Properties/launchSettings.json similarity index 100% rename from src/Sample/Subscriber/Properties/launchSettings.json rename to src/Sample/csharp/Subscriber/Properties/launchSettings.json diff --git a/src/Sample/Subscriber/Subscriber.csproj b/src/Sample/csharp/Subscriber/Subscriber.csproj similarity index 76% rename from src/Sample/Subscriber/Subscriber.csproj rename to src/Sample/csharp/Subscriber/Subscriber.csproj index db337d9..b6f6887 100644 --- a/src/Sample/Subscriber/Subscriber.csproj +++ b/src/Sample/csharp/Subscriber/Subscriber.csproj @@ -9,7 +9,7 @@ - + diff --git a/src/Sample/csharp/SumConsumer/Program.cs b/src/Sample/csharp/SumConsumer/Program.cs new file mode 100644 index 0000000..87a5aaf --- /dev/null +++ b/src/Sample/csharp/SumConsumer/Program.cs @@ -0,0 +1,185 @@ +using System.Diagnostics; +using System.Globalization; +using Cloudtoid.Interprocess; + +// C# Sum Consumer - validates that all messages from concurrent producers are received +// Usage: SumConsumer [timeout_seconds] [event_name] +// Example: SumConsumer 500 124750 test-queue 30 ProducerStartEvent_123456 + +namespace SumConsumer; + +internal static partial class Program +{ + internal static int Main(string[] args) + { + EventWaitHandle? eventHandle = null; + try + { + if (args.Length < 3) + { + Console.Error.WriteLine("Usage: SumConsumer " + + "[timeout_seconds] [event_name]"); + Console.Error.WriteLine("Example: SumConsumer 500 124750 test-queue 30 ProducerStartEvent_123456"); + return 1; + } + + int expectedCount = int.Parse(args[0], CultureInfo.InvariantCulture); + long expectedSum = long.Parse(args[1], CultureInfo.InvariantCulture); + string queueName = args[2]; + int timeoutSeconds = args.Length > 3 + ? int.Parse(args[3], CultureInfo.InvariantCulture) + : 30; + string? eventName = args.Length > 4 ? args[4] : null; + + if (expectedCount <= 0) + { + Console.Error.WriteLine("Error: Expected count must be a positive integer"); + return 1; + } + + Console.WriteLine("Sum Consumer starting..."); + Console.WriteLine($"Expected message count: {expectedCount}"); + Console.WriteLine($"Expected sum: {expectedSum}"); + Console.WriteLine($"Queue: {queueName}"); + Console.WriteLine($"Timeout: {timeoutSeconds} seconds"); + Console.WriteLine(); + + const int capacity = 10 * 1024 * 1024; // 10MB for high concurrency + + var options = new QueueOptions( + queueName: queueName, + capacity: capacity); + + var factory = new QueueFactory(); + using var subscriber = factory.CreateSubscriber(options); + + Console.WriteLine("Connected to queue"); + + // If event name provided, create and signal the event + if (!string.IsNullOrEmpty(eventName)) + { + Console.WriteLine($"Signaling start event: {eventName}"); + try + { + eventHandle = new EventWaitHandle( + initialState: false, + mode: EventResetMode.ManualReset, + name: eventName); + eventHandle.Set(); + Console.WriteLine("Event signaled - all producers starting now!"); + } + catch (Exception ex) + { + Console.Error.WriteLine($"Failed to create/signal event: {ex.Message}"); + return 1; + } + } + + Console.WriteLine("Waiting for messages..."); + Console.WriteLine(); + + var receivedValues = new List(); + var uniqueValues = new HashSet(); + int messageCount = 0; + long actualSum = 0; + + var startTime = Stopwatch.StartNew(); + var lastMessageTime = Stopwatch.StartNew(); + var timeoutDuration = TimeSpan.FromSeconds(timeoutSeconds); + + var buffer = new byte[sizeof(int)]; + while (messageCount < expectedCount) + { + if (subscriber.TryDequeue(buffer, default, out var message)) + { + if (message.Length >= sizeof(int)) + { + int intValue = BitConverter.ToInt32(message.Span); + + receivedValues.Add(intValue); + uniqueValues.Add(intValue); + actualSum += intValue; + messageCount++; + lastMessageTime.Restart(); + + string receivedMsg = $"Received value: {intValue} " + + $"(message {messageCount}/{expectedCount}, running sum: {actualSum})"; + Console.WriteLine(receivedMsg); + } + } + else + { + // Check for timeout + if (lastMessageTime.Elapsed > timeoutDuration) + { + string timeoutMsg = "Timeout waiting for messages. " + + $"Received {messageCount}/{expectedCount}"; + Console.Error.WriteLine(timeoutMsg); + break; + } + + // Small sleep to avoid busy waiting + Thread.Sleep(1); + } + } + + startTime.Stop(); + + // Print results + Console.WriteLine(); + Console.WriteLine("=== Sum Consumer Results ==="); + Console.WriteLine($"Total runtime: {startTime.ElapsedMilliseconds} ms"); + Console.WriteLine($"Messages received: {messageCount} / {expectedCount}"); + Console.WriteLine($"Actual sum: {actualSum}"); + Console.WriteLine($"Expected sum: {expectedSum}"); + Console.WriteLine($"Unique values received: {uniqueValues.Count}"); + + // Show received values (abbreviated if too many) + if (receivedValues.Count <= 100) + { + Console.WriteLine($"Received values: [{string.Join(", ", receivedValues)}]"); + } + else + { + Console.WriteLine($"Received values (first 50): [{string.Join(", ", receivedValues.Take(50))}]"); + var lastValues = string.Join(", ", receivedValues.Skip(receivedValues.Count - 50)); + Console.WriteLine($"Received values (last 50): [{lastValues}]"); + } + + // Validation + bool success = messageCount == expectedCount && actualSum == expectedSum; + + if (success) + { + Console.WriteLine("✓ SUCCESS: All messages received with correct sum!"); + Console.WriteLine(" Perfect integrity - no data corruption or loss detected."); + Console.WriteLine(); + return 0; + } + + Console.WriteLine($"✗ FAILED: Expected sum {expectedSum} but got {actualSum}"); + Console.WriteLine($" Difference: {actualSum - expectedSum}"); + Console.WriteLine(); + + if (uniqueValues.Count != receivedValues.Count) + { + int duplicateCount = receivedValues.Count - uniqueValues.Count; + Console.WriteLine($"⚠ Note: {duplicateCount} duplicate value(s) detected"); + Console.WriteLine(" This is expected when value ranges overlap."); + Console.WriteLine(); + } + + return 1; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + Console.Error.WriteLine(ex.StackTrace); + return 1; + } + finally + { + eventHandle?.Dispose(); + } + } +} \ No newline at end of file diff --git a/src/Sample/csharp/SumConsumer/SumConsumer.csproj b/src/Sample/csharp/SumConsumer/SumConsumer.csproj new file mode 100644 index 0000000..e0abdf5 --- /dev/null +++ b/src/Sample/csharp/SumConsumer/SumConsumer.csproj @@ -0,0 +1,16 @@ + + + + Exe + true + + + + + + + + + + + diff --git a/test-comprehensive.ps1 b/test-comprehensive.ps1 new file mode 100644 index 0000000..42debc1 --- /dev/null +++ b/test-comprehensive.ps1 @@ -0,0 +1,732 @@ +################################################################################ +# Comprehensive Cross-Process Message Integrity Test Suite +################################################################################ +# +# Purpose: End-to-end integration testing of the Interprocess library across +# C++ and C# implementations, validating data integrity, cross-language +# interop, and concurrent producer scenarios. +# +# Test Scenarios (6 total): +# 1. C++ Producer → C++ Consumer +# - Validates C++ native implementation +# - 100 sequential messages (values 0-99) +# +# 2. C++ Producer → C# Subscriber +# - Tests C++ to C# interop +# - Ensures CloudtoidInterprocess C# library can read C++ messages +# +# 3. C# Publisher → C++ Consumer +# - Tests C# to C++ interop +# - Validates C++ can consume C#-produced messages +# +# 4. C# Publisher → C# Subscriber +# - Validates C# native implementation +# - Tests CloudtoidInterprocess library end-to-end +# +# 5. Multiple Concurrent C++ Producers → Single C++ Consumer +# - Stress test: 100 concurrent producers, 5 messages each (500 total) +# - Uses Windows Named Events for true concurrent start +# - Validates no data corruption under high concurrency +# - Sum validation: ensures all messages received (sum = 124750) +# +# 6. Multiple Concurrent C++ Producers → Single C# Consumer +# - Cross-language concurrency test +# - Validates C# library can handle concurrent C++ producers +# - Same concurrency pattern as scenario 5 +# - C# uses EventWaitHandle (managed .NET API, not P/Invoke) +# +# Key Features: +# - Sequential Validation: Scenarios 1-4 verify exact message order (0-99) +# - Sum Validation: Scenarios 5-6 use arithmetic sequence sum for integrity +# - Concurrent Synchronization: Named Events ensure all producers start simultaneously +# - Process Isolation: Each test uses unique queue names with timestamps +# - Comprehensive Coverage: Tests all 4 language combinations + concurrency +# +# Usage: +# .\test-comprehensive.ps1 # Run all 6 tests +# .\test-comprehensive.ps1 -Scenario cpp-cpp # Run specific test +# .\test-comprehensive.ps1 -Scenario concurrent # Run C++ concurrent test +# .\test-comprehensive.ps1 -Scenario concurrent-csharp # Run C++ → C# concurrent test +# .\test-comprehensive.ps1 -MessageCount 50 # Use 50 messages for interop tests +# .\test-comprehensive.ps1 -ConcurrentProducerCount 10 # Use 10 concurrent producers +# +# Scenarios: all, cpp-cpp, cpp-csharp, csharp-cpp, csharp-csharp, concurrent, concurrent-csharp +# +# Prerequisites: +# - All C++ projects built (Producer.exe, Consumer.exe, RangeProducer.exe, SumConsumer.exe) +# - All C# projects built (Publisher, Subscriber, SumConsumer) +# - msbuild in PATH +# - .NET 9.0 SDK installed +# +# Exit Codes: +# 0 - All tests passed +# 1 - One or more tests failed or prerequisites missing +# +################################################################################ + +param( + [int]$MessageCount = 100, + [string]$Scenario = "all", + [int]$ConcurrentProducerCount = 50, + [int]$MessagesPerProducer = 500 +) + +$ErrorActionPreference = "Stop" + +Write-Host "=== Cross-Process Message Integrity Test Suite ===" -ForegroundColor Cyan +if ($Scenario -ne "concurrent") { + Write-Host "Testing with $MessageCount messages (values 0-99 sequentially)" -ForegroundColor Cyan +} +if ($Scenario -eq "all" -or $Scenario -eq "concurrent") { + Write-Host "Concurrent test: $ConcurrentProducerCount producers × $MessagesPerProducer messages" -ForegroundColor Cyan +} +Write-Host "" + +# Build project first +Write-Host "Building the project..." -ForegroundColor Yellow +Push-Location "src" +try { + $buildResult = msbuild /t:rebuild /p:Platform=x64 2>&1 + if ($LASTEXITCODE -ne 0) { + Write-Host "Build failed!" -ForegroundColor Red + Write-Host $buildResult -ForegroundColor Red + throw "Build failed" + } + Write-Host "Build successful!" -ForegroundColor Green +} finally { + Pop-Location +} + +# Define paths +$cppProducer = "src\x64\Debug\Producer.exe" +$cppConsumer = "src\x64\Debug\Consumer.exe" +$cppRangeProducer = "src\x64\Debug\RangeProducer.exe" +$cppSumConsumer = "src\x64\Debug\SumConsumer.exe" +$csharpPublisher = "src\Sample\csharp\Publisher" +$csharpSubscriber = "src\Sample\csharp\Subscriber" +$csharpSumConsumer = "src\Sample\csharp\SumConsumer" + +# Verify paths exist +$paths = @{ + "C++ Producer" = $cppProducer + "C++ Consumer" = $cppConsumer + "C++ Range Producer" = $cppRangeProducer + "C++ Sum Consumer" = $cppSumConsumer + "C# Publisher" = "$csharpPublisher\bin\Debug\net9.0\Publisher.exe" + "C# Subscriber" = "$csharpSubscriber\bin\Debug\net9.0\Subscriber.exe" + "C# Sum Consumer" = "$csharpSumConsumer\bin\Debug\net9.0\SumConsumer.exe" +} + +foreach ($name in $paths.Keys) { + if (!(Test-Path $paths[$name])) { + throw "$name not found at $($paths[$name])" + } +} + +Write-Host "All executables found!" -ForegroundColor Green +Write-Host "" + +# Run unit test suites when running all scenarios +if ($Scenario -eq "all") { + Write-Host "=== Running Unit Test Suites ===" -ForegroundColor Cyan + Write-Host "" + + # Run C++ GTest suite + Write-Host "Running C++ GTest suite..." -ForegroundColor Yellow + $gtestExe = "src\x64\Debug\Interprocess.Native.Static.Tests.exe" + if (Test-Path $gtestExe) { + $gtestOutput = & $gtestExe --gtest_filter="CircularBufferTest.*:QueueTest.*" 2>&1 + $gtestExitCode = $LASTEXITCODE + + # Show summary + $gtestOutput | Select-String -Pattern "\[==========\]|\[ PASSED \]|\[ FAILED \]" | ForEach-Object { + if ($_ -match "FAILED") { + Write-Host $_ -ForegroundColor Red + } else { + Write-Host $_ -ForegroundColor Green + } + } + + if ($gtestExitCode -ne 0) { + Write-Host "C++ GTest suite FAILED!" -ForegroundColor Red + Write-Host "Full output:" -ForegroundColor Yellow + $gtestOutput | ForEach-Object { Write-Host $_ } + throw "C++ unit tests failed" + } + Write-Host "✅ C++ GTest suite passed" -ForegroundColor Green + } else { + Write-Host "⚠ C++ GTest suite not found at $gtestExe - skipping" -ForegroundColor Yellow + } + Write-Host "" + + # Run .NET tests + Write-Host "Running .NET test suite..." -ForegroundColor Yellow + Push-Location "src" + try { + $dotnetTestOutput = dotnet test Interprocess.Tests/Interprocess.Tests.csproj --no-build --verbosity minimal 2>&1 + + # Parse test results from output (ignore vcxproj warnings) + $passedLine = $dotnetTestOutput | Select-String "Passed!" + $failedCount = 0 + $testsPassed = $false + + if ($passedLine -match "Failed:\s+(\d+)") { + $failedCount = [int]$matches[1] + } + if ($passedLine -match "Passed!") { + $testsPassed = $true + } + + # Show relevant output (filter out vcxproj MSB4278 warnings) + $dotnetTestOutput | Where-Object { $_ -notmatch "MSB4278|VCTargetsPath" } | ForEach-Object { + if ($_ -match "Failed!.*Failed:\s+[1-9]") { + Write-Host $_ -ForegroundColor Red + } elseif ($_ -match "Passed!") { + Write-Host $_ -ForegroundColor Green + } elseif ($_ -match "\[SKIP\]|Skipped") { + Write-Host $_ -ForegroundColor Yellow + } elseif ($_ -match "^\s*$") { + # Skip empty lines + } else { + Write-Host $_ + } + } + + if ($failedCount -gt 0 -or !$testsPassed) { + Write-Host ".NET test suite FAILED! ($failedCount test(s) failed)" -ForegroundColor Red + throw ".NET unit tests failed" + } + + # Extract test counts + if ($passedLine -match "Passed:\s+(\d+)") { + $passedCount = $matches[1] + Write-Host "✅ .NET test suite passed ($passedCount tests)" -ForegroundColor Green + } else { + Write-Host "✅ .NET test suite passed" -ForegroundColor Green + } + } finally { + Pop-Location + } + Write-Host "" + Write-Host "=== Starting Integration Tests ===" -ForegroundColor Cyan + Write-Host "" +} + +function Stop-TestProcesses { + taskkill /F /IM Producer.exe 2>$null | Out-Null + taskkill /F /IM Consumer.exe 2>$null | Out-Null + taskkill /F /IM Publisher.exe 2>$null | Out-Null + taskkill /F /IM Subscriber.exe 2>$null | Out-Null + taskkill /F /IM RangeProducer.exe 2>$null | Out-Null + taskkill /F /IM SumConsumer.exe 2>$null | Out-Null + Start-Sleep -Seconds 1 +} + +function Test-CrossProcess { + param( + [string]$TestName, + [string]$ProducerExe, + [string]$ProducerWorkDir, + [string]$ConsumerExe, + [string]$ConsumerWorkDir, + [int]$Count + ) + + Write-Host "=== Testing: $TestName ===" -ForegroundColor Magenta + + # Create unique queue name for this test + $timestamp = [DateTimeOffset]::UtcNow.ToUnixTimeMilliseconds() + $queueName = "test-queue-$timestamp" + Write-Host "Using queue: $queueName" -ForegroundColor Cyan + + Stop-TestProcesses + + try { + # Start Consumer first + Write-Host "Starting Consumer..." -ForegroundColor Green + if ($ConsumerWorkDir) { + $consumerJob = Start-Job -ScriptBlock { + param($workDir, $exe, $count, $queue) + Set-Location $workDir + if ($exe.EndsWith(".exe") -and (Test-Path "./$($exe.Split('\')[-1])")) { + & "./$($exe.Split('\')[-1])" $count $queue 2>&1 + } else { + # Use dotnet run for C# projects + dotnet run --no-build -- $queue 2>&1 + } + } -ArgumentList (Resolve-Path $ConsumerWorkDir).Path, $ConsumerExe, $Count, $queueName + } else { + $consumerJob = Start-Job -ScriptBlock { + param($exe, $count, $queue) + & $exe $count $queue 2>&1 + } -ArgumentList (Resolve-Path $ConsumerExe).Path, $Count, $queueName + } + + # Wait for consumer to start + Start-Sleep -Seconds 3 + + # Start Producer + Write-Host "Starting Producer to send $Count messages..." -ForegroundColor Green + if ($ProducerWorkDir) { + $producerJob = Start-Job -ScriptBlock { + param($workDir, $exe, $count, $queue) + Set-Location $workDir + if ($exe.EndsWith(".exe") -and (Test-Path "./$($exe.Split('\')[-1])")) { + & "./$($exe.Split('\')[-1])" $count $queue 2>&1 + } else { + # Use dotnet run for C# projects + dotnet run --no-build -- $count $queue 2>&1 + } + } -ArgumentList (Resolve-Path $ProducerWorkDir).Path, $ProducerExe, $Count, $queueName + } else { + $producerJob = Start-Job -ScriptBlock { + param($exe, $count, $queue) + & $exe $count $queue 2>&1 + } -ArgumentList (Resolve-Path $ProducerExe).Path, $Count, $queueName + } + + # Wait for completion + Write-Host "Waiting for Producer completion..." -ForegroundColor Yellow + Wait-Job $producerJob -Timeout 30 | Out-Null + + Write-Host "Waiting for Consumer completion..." -ForegroundColor Yellow + Wait-Job $consumerJob -Timeout 10 | Out-Null + + # Get results + $producerOutput = Receive-Job $producerJob + $consumerOutput = Receive-Job $consumerJob + + Write-Host "`nAnalyzing results..." -ForegroundColor Cyan + + # Parse producer results + $producerSent = 0 + $producerMatch = $producerOutput | Select-String "Total messages sent: (\d+)" + if ($producerMatch) { + $producerSent = [int]$producerMatch.Matches[0].Groups[1].Value + } + + # Parse consumer results + $consumerReceived = 0 + $sequenceSuccess = $false + + $consumerMatch = $consumerOutput | Select-String "Total messages received: (\d+)" + if ($consumerMatch) { + $consumerReceived = [int]$consumerMatch.Matches[0].Groups[1].Value + } + + # Check for sequence validation (supports both C++ and C# formats) + $sequenceSuccess = $consumerOutput | Select-String "Perfect message integrity" + $sequenceError = $consumerOutput | Select-String "SEQUENCE ERROR|Sequence validation failed" + + # Determine result + $success = $false + $message = "" + + if ($producerSent -eq $Count -and $consumerReceived -eq $Count -and $sequenceSuccess) { + $success = $true + $message = "Perfect message integrity! All $Count messages sent and received with correct sequence (0-$([Math]::Min($Count-1, 99)))" + } elseif ($producerSent -ne $Count) { + $message = "Producer failed to send all messages. Sent: $producerSent, Expected: $Count" + } elseif ($consumerReceived -ne $Count) { + $message = "Consumer failed to receive all messages. Received: $consumerReceived, Expected: $Count" + } elseif ($sequenceError) { + $message = "Sequence validation failed. Messages received out of order or with incorrect values." + } else { + $message = "Unknown validation failure" + } + + # Display results + if ($success) { + Write-Host "🎉 SUCCESS: $message" -ForegroundColor Green + } else { + Write-Host "❌ FAILED: $message" -ForegroundColor Red + Write-Host "Producer Output:" -ForegroundColor Yellow + $producerOutput | ForEach-Object { Write-Host " $_" -ForegroundColor Gray } + Write-Host "Consumer Output:" -ForegroundColor Yellow + $consumerOutput | ForEach-Object { Write-Host " $_" -ForegroundColor Gray } + } + + return @{ + Success = $success + Message = $message + ProducerSent = $producerSent + ConsumerReceived = $consumerReceived + } + + } finally { + Get-Job | Remove-Job -Force + Stop-TestProcesses + } +} + +function Test-ConcurrentProducers { + param( + [int]$ProducerCount, + [int]$MessagesPerProducer + ) + + Write-Host "=== Testing: Multiple Concurrent C++ Producers → Single C++ Consumer ===" -ForegroundColor Magenta + Write-Host "This test validates that multiple producers can write concurrently without data corruption" -ForegroundColor Cyan + Write-Host "" + + # Test parameters + $timestamp = [DateTimeOffset]::UtcNow.ToUnixTimeMilliseconds() + $queueName = "test-concurrent-$timestamp" + $eventName = "ProducerStartEvent_$timestamp" + $totalMessages = $ProducerCount * $MessagesPerProducer + + # Calculate expected sum + # Producer i sends values from (i * MessagesPerProducer) to ((i+1) * MessagesPerProducer - 1) + # Sum of arithmetic sequence: sum = n * (first + last) / 2 + $expectedSum = 0 + for ($i = 0; $i -lt $ProducerCount; $i++) { + $start = $i * $MessagesPerProducer + $end = $start + $MessagesPerProducer - 1 + $rangeSum = $MessagesPerProducer * ($start + $end) / 2 + $expectedSum += $rangeSum + } + + Write-Host "Configuration:" -ForegroundColor Cyan + Write-Host " Producers: $ProducerCount" -ForegroundColor White + Write-Host " Messages per producer: $MessagesPerProducer" -ForegroundColor White + Write-Host " Total messages: $totalMessages" -ForegroundColor White + Write-Host " Expected sum: $expectedSum" -ForegroundColor White + Write-Host " Queue: $queueName" -ForegroundColor White + Write-Host " Event: $eventName" -ForegroundColor White + Write-Host "" + + Stop-TestProcesses + + try { + # Launch all producers FIRST (they will block waiting for the event) + Write-Host "Launching $ProducerCount producers (blocking on event)..." -ForegroundColor Green + $producerJobs = @() + + for ($i = 0; $i -lt $ProducerCount; $i++) { + $startValue = $i * $MessagesPerProducer + + $producerJob = Start-Job -ScriptBlock { + param($exe, $start, $count, $queue, $eventName) + & $exe $start $count $queue $eventName 2>&1 + } -ArgumentList (Resolve-Path $cppRangeProducer).Path, $startValue, $MessagesPerProducer, $queueName, $eventName + + $producerJobs += $producerJob + } + + Write-Host "All producers launched and blocking..." -ForegroundColor Yellow + + # Give producers time to start and reach blocking state + Start-Sleep -Seconds 2 + + # Start consumer (which will signal the event to release all producers) + Write-Host "Starting consumer (will signal event to start all producers)..." -ForegroundColor Green + $consumerJob = Start-Job -ScriptBlock { + param($exe, $count, $sum, $queue, $eventName) + & $exe $count $sum $queue 30 $eventName 2>&1 + } -ArgumentList (Resolve-Path $cppSumConsumer).Path, $totalMessages, $expectedSum, $queueName, $eventName + + Write-Host "🚀 Producers will start SIMULTANEOUSLY!" -ForegroundColor Green + Write-Host "" + + # Wait for all producers to complete + Write-Host "Waiting for producers..." -ForegroundColor Yellow + $producerJobs | Wait-Job -Timeout 30 | Out-Null + + # Check how many producers completed successfully + $completedCount = ($producerJobs | Where-Object { $_.State -eq 'Completed' }).Count + $runningCount = ($producerJobs | Where-Object { $_.State -eq 'Running' }).Count + Write-Host "Producer status: $completedCount completed, $runningCount still running (out of $ProducerCount)" -ForegroundColor Cyan + + # Give consumer time to process remaining messages + Start-Sleep -Seconds 2 + + # Wait for consumer + Write-Host "Waiting for consumer..." -ForegroundColor Yellow + Wait-Job $consumerJob -Timeout 10 | Out-Null + + # Get consumer output + $consumerOutput = Receive-Job $consumerJob + + # Parse results + $success = $consumerOutput | Select-String "SUCCESS: All messages received with correct sum" + $failed = $consumerOutput | Select-String "FAILED:" + + $message = "" + $testSuccess = $false + + if ($success) { + $testSuccess = $true + $message = "All $totalMessages messages from $ProducerCount concurrent producers received with perfect integrity" + } elseif ($failed) { + $failLine = ($failed | Select-Object -First 1).Line + $message = $failLine + } else { + # Try to extract message count from output + Write-Host $consumerOutput + $lastMessageLine = $consumerOutput | Select-String "message (\d+)/$totalMessages" | Select-Object -Last 1 + if ($lastMessageLine -and $lastMessageLine.Matches.Groups.Count -gt 1) { + $receivedCount = $lastMessageLine.Matches.Groups[1].Value + $message = "TIMEOUT: Consumer received only $receivedCount out of $totalMessages messages before timing out" + } else { + $message = "TIMEOUT: Consumer did not complete (could not parse message count)" + } + } + + return @{ + Success = $testSuccess + Message = $message + } + + } finally { + Get-Job | Remove-Job -Force + Stop-TestProcesses + } +} + +function Test-ConcurrentProducersCSharpConsumer { + param( + [int]$ProducerCount, + [int]$MessagesPerProducer + ) + + Write-Host "=== Testing: Multiple Concurrent C++ Producers → Single C# Consumer ===" -ForegroundColor Magenta + Write-Host "This test validates that the C# library can consume from concurrent C++ producers" -ForegroundColor Cyan + Write-Host "" + + # Test parameters + $timestamp = [DateTimeOffset]::UtcNow.ToUnixTimeMilliseconds() + $queueName = "test-concurrent-csharp-$timestamp" + $eventName = "ProducerStartEvent_$timestamp" + $totalMessages = $ProducerCount * $MessagesPerProducer + + # Calculate expected sum + $expectedSum = 0 + for ($i = 0; $i -lt $ProducerCount; $i++) { + $start = $i * $MessagesPerProducer + $end = $start + $MessagesPerProducer - 1 + $rangeSum = $MessagesPerProducer * ($start + $end) / 2 + $expectedSum += $rangeSum + } + + Write-Host "Configuration:" -ForegroundColor Cyan + Write-Host " Producers: $ProducerCount (C++)" -ForegroundColor White + Write-Host " Consumer: C#" -ForegroundColor White + Write-Host " Messages per producer: $MessagesPerProducer" -ForegroundColor White + Write-Host " Total messages: $totalMessages" -ForegroundColor White + Write-Host " Expected sum: $expectedSum" -ForegroundColor White + Write-Host " Queue: $queueName" -ForegroundColor White + Write-Host " Event: $eventName" -ForegroundColor White + Write-Host "" + + Stop-TestProcesses + + try { + # Launch all C++ producers (they will block waiting for the event) + Write-Host "Launching $ProducerCount C++ producers (blocking on event)..." -ForegroundColor Green + $producerJobs = @() + + for ($i = 0; $i -lt $ProducerCount; $i++) { + $startValue = $i * $MessagesPerProducer + + $producerJob = Start-Job -ScriptBlock { + param($exe, $start, $count, $queue, $eventName) + & $exe $start $count $queue $eventName 2>&1 + } -ArgumentList (Resolve-Path $cppRangeProducer).Path, $startValue, $MessagesPerProducer, $queueName, $eventName + + $producerJobs += $producerJob + } + + Write-Host "All C++ producers launched and blocking..." -ForegroundColor Yellow + + # Give producers time to start and reach blocking state + Start-Sleep -Seconds 2 + + # Start C# consumer (which will signal the event to release all producers) + Write-Host "Starting C# consumer (will signal event to start all producers)..." -ForegroundColor Green + $csharpConsumerExe = Join-Path $csharpSumConsumer "bin\Debug\net9.0\SumConsumer.exe" + $consumerJob = Start-Job -ScriptBlock { + param($exe, $count, $sum, $queue, $eventName) + & $exe $count $sum $queue 30 $eventName 2>&1 + } -ArgumentList (Resolve-Path $csharpConsumerExe).Path, $totalMessages, $expectedSum, $queueName, $eventName + + Write-Host "🚀 Producers will start SIMULTANEOUSLY!" -ForegroundColor Green + Write-Host "" + + # Wait for all producers to complete + Write-Host "Waiting for C++ producers..." -ForegroundColor Yellow + $producerJobs | Wait-Job -Timeout 30 | Out-Null + + # Check how many producers completed successfully + $completedJobs = $producerJobs | Where-Object { $_.State -eq 'Completed' } + $completedCount = $completedJobs.Count + $runningCount = ($producerJobs | Where-Object { $_.State -eq 'Running' }).Count + $failedCount = ($producerJobs | Where-Object { $_.State -eq 'Failed' }).Count + + # Check exit codes for completed jobs + $successCount = 0 + $exitCodeFailures = @() + foreach ($job in $completedJobs) { + $exitCode = (Receive-Job $job -Keep -ErrorAction SilentlyContinue | Select-Object -Last 1).ExitCode + if ($null -eq $exitCode) { + # Try to get process exit code another way + $jobOutput = Receive-Job $job -Keep 2>&1 + if ($jobOutput -match "SUCCESS:") { + $successCount++ + } else { + $exitCodeFailures += "Job $($job.Id) may have failed (no clear success marker)" + } + } elseif ($exitCode -eq 0) { + $successCount++ + } else { + $exitCodeFailures += "Job $($job.Id) exited with code $exitCode" + } + } + + Write-Host "Producer status: $successCount succeeded (exit 0), $($completedCount - $successCount) completed with errors, $runningCount still running, $failedCount failed (out of $ProducerCount)" -ForegroundColor Cyan + + if ($exitCodeFailures.Count -gt 0) { + Write-Host "Exit code failures:" -ForegroundColor Red + $exitCodeFailures | Select-Object -First 10 | ForEach-Object { Write-Host " $_" -ForegroundColor Red } + + # Show full output from failed jobs + Write-Host "Full output from failed producers:" -ForegroundColor Red + foreach ($failureMsg in ($exitCodeFailures | Select-Object -First 3)) { + $jobId = [regex]::Match($failureMsg, "Job (\d+)").Groups[1].Value + $job = $producerJobs | Where-Object { $_.Id -eq [int]$jobId } | Select-Object -First 1 + if ($job) { + Write-Host "--- Job $jobId Full Output ---" -ForegroundColor Yellow + Receive-Job $job -Keep 2>&1 | ForEach-Object { Write-Host " $_" } + } + } + } + + # Get any error output from producers + $producerErrors = $producerJobs | Receive-Job 2>&1 | Select-String -Pattern "FATAL|error.*failed|exception" -CaseSensitive:$false + if ($producerErrors) { + Write-Host "Producer errors detected:" -ForegroundColor Red + $producerErrors | Select-Object -First 10 | ForEach-Object { Write-Host " $_" -ForegroundColor Red } + } + + # Show sample output from first few producers for debugging + if ($VerbosePreference -eq 'Continue') { + Write-Host "Sample output from first 3 producers:" -ForegroundColor Yellow + $producerJobs | Select-Object -First 3 | ForEach-Object { + Write-Host "--- Producer Job $($_.Id) ---" -ForegroundColor DarkGray + Receive-Job $_ -Keep 2>&1 | Select-Object -Last 15 | ForEach-Object { Write-Host " $_" } + } + } + + # Give consumer time to process remaining messages + Start-Sleep -Seconds 2 + + # Wait for C# consumer + Write-Host "Waiting for C# consumer..." -ForegroundColor Yellow + Wait-Job $consumerJob -Timeout 10 | Out-Null + + # Get consumer output + $consumerOutput = Receive-Job $consumerJob + + # Parse results first + $success = $consumerOutput | Select-String "SUCCESS: All messages received with correct sum" + $failed = $consumerOutput | Select-String "FAILED:" + + # Show filtered output (skip per-message output, show summary only) + Write-Host "Consumer output (summary):" -ForegroundColor Yellow + $consumerOutput | Where-Object { + $_ -notmatch "Received value:" -and + $_ -notmatch "message \d+/\d+" -and + $_ -match "\S" # Not empty + } | ForEach-Object { Write-Host $_ } + Write-Host "" + + $message = "" + $testSuccess = $false + + if ($success) { + $testSuccess = $true + $message = "C# library successfully consumed all $totalMessages messages from $ProducerCount concurrent C++ producers with perfect integrity" + } elseif ($failed) { + $failLine = ($failed | Select-Object -First 1).Line + $message = $failLine + } else { + Write-Host "⚠ Could not find SUCCESS or FAILED in output. Showing last 10 lines:" -ForegroundColor Yellow + $consumerOutput | Select-Object -Last 10 | ForEach-Object { Write-Host $_ } + + # Try to extract message count from output + $lastMessageLine = $consumerOutput | Select-String "message (\d+)/$totalMessages" | Select-Object -Last 1 + if ($lastMessageLine -and $lastMessageLine.Matches.Groups.Count -gt 1) { + $receivedCount = $lastMessageLine.Matches.Groups[1].Value + $message = "TIMEOUT: Consumer received only $receivedCount out of $totalMessages messages before timing out" + } else { + $message = "TIMEOUT: Consumer did not complete (could not parse message count)" + } + } + + return @{ + Success = $testSuccess + Message = $message + } + + } finally { + Get-Job | Remove-Job -Force + Stop-TestProcesses + } +} + +# Run tests +$results = @() + +if ($Scenario -eq "all" -or $Scenario -eq "cpp-cpp") { + $result = Test-CrossProcess "C++ Producer → C++ Consumer" $cppProducer $null $cppConsumer $null $MessageCount + $results += [PSCustomObject]@{ Name = "C++ → C++"; Success = $result.Success; Message = $result.Message } +} + +if ($Scenario -eq "all" -or $Scenario -eq "cpp-csharp") { + $result = Test-CrossProcess "C++ Producer → C# Subscriber" $cppProducer $null "Subscriber.exe" $csharpSubscriber $MessageCount + $results += [PSCustomObject]@{ Name = "C++ → C#"; Success = $result.Success; Message = $result.Message } +} + +if ($Scenario -eq "all" -or $Scenario -eq "csharp-cpp") { + $result = Test-CrossProcess "C# Publisher → C++ Consumer" "Publisher.exe" $csharpPublisher $cppConsumer $null $MessageCount + $results += [PSCustomObject]@{ Name = "C# → C++"; Success = $result.Success; Message = $result.Message } +} + +if ($Scenario -eq "all" -or $Scenario -eq "csharp-csharp") { + $result = Test-CrossProcess "C# Publisher → C# Subscriber" "Publisher.exe" $csharpPublisher "Subscriber.exe" $csharpSubscriber $MessageCount + $results += [PSCustomObject]@{ Name = "C# → C#"; Success = $result.Success; Message = $result.Message } +} + +if ($Scenario -eq "all" -or $Scenario -eq "concurrent") { + $result = Test-ConcurrentProducers $ConcurrentProducerCount $MessagesPerProducer + $results += [PSCustomObject]@{ Name = "Concurrent C++ → C++"; Success = $result.Success; Message = $result.Message } +} + +if ($Scenario -eq "all" -or $Scenario -eq "concurrent-csharp") { + $result = Test-ConcurrentProducersCSharpConsumer $ConcurrentProducerCount $MessagesPerProducer + $results += [PSCustomObject]@{ Name = "Concurrent C++ → C#"; Success = $result.Success; Message = $result.Message } +} + +# Final summary +Write-Host "`n" + "="*80 -ForegroundColor Cyan +Write-Host "FINAL SUMMARY" -ForegroundColor Cyan +Write-Host "="*80 -ForegroundColor Cyan + +$passCount = 0 +foreach ($result in $results) { + if ($result.Success) { + Write-Host "✅ $($result.Name): SUCCESS" -ForegroundColor Green + $passCount++ + } else { + Write-Host "❌ $($result.Name): FAILED - $($result.Message)" -ForegroundColor Red + } +} + +Write-Host "" +if ($passCount -eq $results.Count) { + Write-Host "🎉 ALL TESTS PASSED! ($passCount/$($results.Count))" -ForegroundColor Green +} else { + Write-Host "⚠ SOME TESTS FAILED ($passCount/$($results.Count) passed)" -ForegroundColor Red +} + +Write-Host "" +Write-Host "Test suite completed." -ForegroundColor Cyan \ No newline at end of file