diff --git a/.gitignore b/.gitignore
index 5bf25ccac6..6e2c068fa3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -100,4 +100,4 @@ src/scaffolding.config
*.sln.iml
# Visual Studio Code
-.vscode
+.vscode
\ No newline at end of file
diff --git a/src/Directory.Packages.props b/src/Directory.Packages.props
index f3c51d39b4..31e79eda52 100644
--- a/src/Directory.Packages.props
+++ b/src/Directory.Packages.props
@@ -17,6 +17,7 @@
+
@@ -78,6 +79,7 @@
+
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/FileSystemBodyStorage.cs b/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/FileSystemBodyStorage.cs
new file mode 100644
index 0000000000..4ca8571cd2
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/FileSystemBodyStorage.cs
@@ -0,0 +1,26 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.BodyStorage
+{
+ using System;
+ using System.IO;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Auditing.BodyStorage;
+
+ ///
+ /// Stores message bodies on the file system.
+ /// Useful when message bodies should not be stored in the database.
+ ///
+ class FileSystemBodyStorage : IBodyStorage
+ {
+ // TODO: Implement file system body storage
+ // - Store bodies as files in a configurable directory
+ // - Use bodyId as filename (with appropriate sanitization)
+ // - Handle expiration via file timestamps or separate cleanup process
+
+ public Task Store(string bodyId, string contentType, int bodySize, Stream bodyStream, CancellationToken cancellationToken)
+ => throw new NotImplementedException("File system body storage not yet implemented");
+
+ public Task TryFetch(string bodyId, CancellationToken cancellationToken)
+ => throw new NotImplementedException("File system body storage not yet implemented");
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/MongoBodyStorage.cs b/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/MongoBodyStorage.cs
new file mode 100644
index 0000000000..ea1e79c071
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/MongoBodyStorage.cs
@@ -0,0 +1,62 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.BodyStorage
+{
+ using System.IO;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Auditing.BodyStorage;
+ using Collections;
+ using Documents;
+ using global::MongoDB.Driver;
+
+ ///
+ /// Reads message bodies from the messageBodies collection.
+ /// Bodies are written asynchronously by BodyStorageWriter via a channel.
+ ///
+ class MongoBodyStorage(IMongoClientProvider clientProvider) : IBodyStorage
+ {
+ public Task Store(string bodyId, string contentType, int bodySize, Stream bodyStream, CancellationToken cancellationToken)
+ {
+ // Bodies are written by BodyStorageWriter, not through IBodyStorage.Store()
+ return Task.CompletedTask;
+ }
+
+ public async Task TryFetch(string bodyId, CancellationToken cancellationToken)
+ {
+ var collection = clientProvider.Database
+ .GetCollection(CollectionNames.MessageBodies);
+
+ var filter = Builders.Filter.Eq(d => d.Id, bodyId);
+ var document = await collection.Find(filter)
+ .FirstOrDefaultAsync(cancellationToken)
+ .ConfigureAwait(false);
+
+ if (document == null)
+ {
+ return new StreamResult { HasResult = false };
+ }
+
+ byte[] bodyBytes;
+ if (document.TextBody != null)
+ {
+ bodyBytes = System.Text.Encoding.UTF8.GetBytes(document.TextBody);
+ }
+ else if (document.BinaryBody != null)
+ {
+ bodyBytes = document.BinaryBody;
+ }
+ else
+ {
+ return new StreamResult { HasResult = false };
+ }
+
+ return new StreamResult
+ {
+ HasResult = true,
+ Stream = new MemoryStream(bodyBytes),
+ ContentType = document.ContentType ?? "text/plain",
+ BodySize = document.BodySize,
+ Etag = bodyId
+ };
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/NullBodyStorage.cs b/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/NullBodyStorage.cs
new file mode 100644
index 0000000000..30068edfc4
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorage/NullBodyStorage.cs
@@ -0,0 +1,19 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.BodyStorage
+{
+ using System.IO;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Auditing.BodyStorage;
+
+ ///
+ /// A no-op body storage implementation used when body storage is disabled.
+ ///
+ class NullBodyStorage : IBodyStorage
+ {
+ public Task Store(string bodyId, string contentType, int bodySize, Stream bodyStream, CancellationToken cancellationToken)
+ => Task.CompletedTask;
+
+ public Task TryFetch(string bodyId, CancellationToken cancellationToken)
+ => Task.FromResult(new StreamResult { HasResult = false });
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorageType.cs b/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorageType.cs
new file mode 100644
index 0000000000..f2f62a68ba
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/BodyStorageType.cs
@@ -0,0 +1,23 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ ///
+ /// Specifies where message bodies should be stored.
+ ///
+ public enum BodyStorageType
+ {
+ ///
+ /// Message bodies are not stored.
+ ///
+ None,
+
+ ///
+ /// Message bodies are stored in the MongoDB database.
+ ///
+ Database,
+
+ ///
+ /// Message bodies are stored on the file system.
+ ///
+ FileSystem
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Collections/CollectionNames.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Collections/CollectionNames.cs
new file mode 100644
index 0000000000..0d5db3c52a
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Collections/CollectionNames.cs
@@ -0,0 +1,14 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Collections
+{
+ ///
+ /// Constants for MongoDB collection names.
+ ///
+ static class CollectionNames
+ {
+ public const string ProcessedMessages = "processedMessages";
+ public const string KnownEndpoints = "knownEndpoints";
+ public const string SagaSnapshots = "sagaSnapshots";
+ public const string FailedAuditImports = "failedAuditImports";
+ public const string MessageBodies = "messageBodies";
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/CustomChecks/CheckMongoDbCachePressure.cs b/src/ServiceControl.Audit.Persistence.MongoDB/CustomChecks/CheckMongoDbCachePressure.cs
new file mode 100644
index 0000000000..a120919f23
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/CustomChecks/CheckMongoDbCachePressure.cs
@@ -0,0 +1,139 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.CustomChecks
+{
+ using System;
+ using System.Diagnostics;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using global::MongoDB.Bson;
+
+ using Microsoft.Extensions.Logging;
+ using NServiceBus.CustomChecks;
+
+ class CheckMongoDbCachePressure(
+ IMongoClientProvider clientProvider,
+ MinimumRequiredStorageState stateHolder,
+ ILogger logger)
+ : CustomCheck("MongoDB Storage Pressure", "ServiceControl.Audit Health", TimeSpan.FromSeconds(5))
+ {
+ public override async Task PerformCheck(CancellationToken cancellationToken = default)
+ {
+ try
+ {
+ if (clientProvider.ProductCapabilities.SupportsWiredTigerCacheMetrics)
+ {
+ return await CheckWiredTigerCachePressure(cancellationToken).ConfigureAwait(false);
+ }
+
+ return await CheckWriteLatency(cancellationToken).ConfigureAwait(false);
+ }
+ catch (Exception ex)
+ {
+ logger.LogError(ex, "Failed to check MongoDB storage pressure");
+
+ // On failure, allow ingestion to continue — don't block on monitoring errors
+ stateHolder.CanIngestMore = true;
+ return CheckResult.Failed($"Unable to check MongoDB storage pressure: {ex.Message}");
+ }
+ }
+
+ async Task CheckWiredTigerCachePressure(CancellationToken cancellationToken)
+ {
+ var serverStatus = await clientProvider.Database
+ .RunCommandAsync(new BsonDocument("serverStatus", 1), cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+
+ var cache = serverStatus["wiredTiger"].AsBsonDocument["cache"].AsBsonDocument;
+
+ var dirtyBytes = cache["tracked dirty bytes in the cache"].ToDouble();
+ var totalBytes = cache["bytes currently in the cache"].ToDouble();
+ var maxBytes = cache["maximum bytes configured"].ToDouble();
+
+ var dirtyPercentage = maxBytes > 0 ? dirtyBytes / maxBytes * 100 : 0;
+ var usedPercentage = maxBytes > 0 ? totalBytes / maxBytes * 100 : 0;
+
+ logger.LogDebug(
+ "MongoDB WiredTiger cache - Dirty: {DirtyPercentage:F1}%, Used: {UsedPercentage:F1}%, Dirty bytes: {DirtyBytes:N0}, Total bytes: {TotalBytes:N0}, Max bytes: {MaxBytes:N0}",
+ dirtyPercentage, usedPercentage, dirtyBytes, totalBytes, maxBytes);
+
+ if (dirtyPercentage >= DirtyThresholdPercentage)
+ {
+ logger.LogWarning(
+ "Audit message ingestion paused. MongoDB WiredTiger dirty cache at {DirtyPercentage:F1}% (threshold: {Threshold}%). This indicates write pressure is exceeding the storage engine's ability to flush to disk",
+ dirtyPercentage, DirtyThresholdPercentage);
+
+ stateHolder.CanIngestMore = false;
+ return CheckResult.Failed(
+ $"MongoDB WiredTiger dirty cache at {dirtyPercentage:F1}% (threshold: {DirtyThresholdPercentage}%). Ingestion paused to allow the storage engine to recover.");
+ }
+
+ stateHolder.CanIngestMore = true;
+ return CheckResult.Pass;
+ }
+
+ async Task CheckWriteLatency(CancellationToken cancellationToken)
+ {
+ var sw = Stopwatch.StartNew();
+ _ = await clientProvider.Database
+ .RunCommandAsync(new BsonDocument("ping", 1), cancellationToken: cancellationToken)
+ .ConfigureAwait(false);
+ sw.Stop();
+
+ var latencyMs = sw.Elapsed.TotalMilliseconds;
+ RecordLatency(latencyMs);
+
+ var sampleCount = Math.Min(latencyIndex, LatencyWindowSize);
+ var avgLatency = GetAverageLatency();
+
+ logger.LogDebug(
+ "MongoDB ping latency: {LatencyMs:F0}ms, Rolling average: {AvgLatency:F0}ms (samples: {SampleCount}/{WindowSize})",
+ latencyMs, avgLatency, sampleCount, LatencyWindowSize);
+
+ if (sampleCount >= MinSamplesBeforeThrottling && avgLatency >= LatencyThresholdMs)
+ {
+ logger.LogWarning(
+ "Audit message ingestion paused. MongoDB average response latency at {AvgLatency:F0}ms (threshold: {Threshold}ms). This indicates the database is under pressure",
+ avgLatency, LatencyThresholdMs);
+
+ stateHolder.CanIngestMore = false;
+ return CheckResult.Failed(
+ $"MongoDB average response latency at {avgLatency:F0}ms (threshold: {LatencyThresholdMs}ms). Ingestion paused to allow the database to recover.");
+ }
+
+ stateHolder.CanIngestMore = true;
+ return CheckResult.Pass;
+ }
+
+ void RecordLatency(double latencyMs)
+ {
+ latencyWindow[latencyIndex % LatencyWindowSize] = latencyMs;
+ latencyIndex++;
+ }
+
+ double GetAverageLatency()
+ {
+ var count = Math.Min(latencyIndex, LatencyWindowSize);
+ if (count == 0)
+ {
+ return 0;
+ }
+
+ double sum = 0;
+ for (var i = 0; i < count; i++)
+ {
+ sum += latencyWindow[i];
+ }
+
+ return sum / count;
+ }
+
+ // WiredTiger thresholds
+ const double DirtyThresholdPercentage = 15;
+
+ // Latency thresholds
+ const int LatencyWindowSize = 6; // 30 seconds of history at 5-second intervals
+ const int MinSamplesBeforeThrottling = 3; // Need at least 15 seconds of data before throttling
+ const double LatencyThresholdMs = 500;
+ readonly double[] latencyWindow = new double[LatencyWindowSize];
+ int latencyIndex;
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Documents/FailedAuditImportDocument.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/FailedAuditImportDocument.cs
new file mode 100644
index 0000000000..349ee28960
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/FailedAuditImportDocument.cs
@@ -0,0 +1,24 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Documents
+{
+ using System.Collections.Generic;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Bson.Serialization.Attributes;
+
+ class FailedAuditImportDocument
+ {
+ [BsonId]
+ public ObjectId Id { get; set; }
+
+ [BsonElement("messageId")]
+ public string MessageId { get; set; }
+
+ [BsonElement("headers")]
+ public Dictionary Headers { get; set; }
+
+ [BsonElement("body")]
+ public byte[] Body { get; set; }
+
+ [BsonElement("exceptionInfo")]
+ public string ExceptionInfo { get; set; }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Documents/KnownEndpointDocument.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/KnownEndpointDocument.cs
new file mode 100644
index 0000000000..ec9499d36c
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/KnownEndpointDocument.cs
@@ -0,0 +1,27 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Documents
+{
+ using System;
+ using global::MongoDB.Bson.Serialization.Attributes;
+
+ class KnownEndpointDocument
+ {
+ [BsonId]
+ public string Id { get; set; }
+
+ [BsonElement("name")]
+ public string Name { get; set; }
+
+ [BsonElement("hostId")]
+ [BsonGuidRepresentation(global::MongoDB.Bson.GuidRepresentation.Standard)]
+ public Guid HostId { get; set; }
+
+ [BsonElement("host")]
+ public string Host { get; set; }
+
+ [BsonElement("lastSeen")]
+ public DateTime LastSeen { get; set; }
+
+ [BsonElement("expiresAt")]
+ public DateTime ExpiresAt { get; set; }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Documents/MessageBodyDocument.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/MessageBodyDocument.cs
new file mode 100644
index 0000000000..0cd4721bef
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/MessageBodyDocument.cs
@@ -0,0 +1,52 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Documents
+{
+ using System;
+ using global::MongoDB.Bson.Serialization.Attributes;
+
+ class MessageBodyDocument
+ {
+ [BsonId]
+ public string Id { get; set; }
+
+ [BsonElement("contentType")]
+ public string ContentType { get; set; }
+
+ [BsonElement("bodySize")]
+ public int BodySize { get; set; }
+
+ ///
+ /// Text body content for text-based content types (JSON, XML, plain text).
+ /// Stored as string for full-text search support.
+ ///
+ [BsonElement("textBody")]
+ [BsonIgnoreIfNull]
+ public string TextBody { get; set; }
+
+ ///
+ /// Binary body content for non-text content types (protobuf, images, etc.).
+ /// Stored as byte[] for efficient storage.
+ ///
+ [BsonElement("binaryBody")]
+ [BsonIgnoreIfNull]
+ public byte[] BinaryBody { get; set; }
+
+ [BsonElement("expiresAt")]
+ public DateTime ExpiresAt { get; set; }
+
+ ///
+ /// Determines if the content type represents text-based content that can be searched.
+ ///
+ public static bool IsTextContentType(string contentType)
+ {
+ if (string.IsNullOrEmpty(contentType))
+ {
+ return false;
+ }
+
+ // TODO: Better way to determine text-based content types?
+ return contentType.StartsWith("text/", StringComparison.OrdinalIgnoreCase) ||
+ contentType.Contains("json", StringComparison.OrdinalIgnoreCase) ||
+ contentType.Contains("xml", StringComparison.OrdinalIgnoreCase);
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Documents/ProcessedMessageDocument.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/ProcessedMessageDocument.cs
new file mode 100644
index 0000000000..6d9b15ae75
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/ProcessedMessageDocument.cs
@@ -0,0 +1,28 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Documents
+{
+ using System;
+ using System.Collections.Generic;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Bson.Serialization.Attributes;
+
+ class ProcessedMessageDocument
+ {
+ [BsonId]
+ public string Id { get; set; }
+
+ [BsonElement("uniqueMessageId")]
+ public string UniqueMessageId { get; set; }
+
+ [BsonElement("messageMetadata")]
+ public BsonDocument MessageMetadata { get; set; }
+
+ [BsonElement("headers")]
+ public Dictionary Headers { get; set; }
+
+ [BsonElement("processedAt")]
+ public DateTime ProcessedAt { get; set; }
+
+ [BsonElement("expiresAt")]
+ public DateTime ExpiresAt { get; set; }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Documents/SagaSnapshotDocument.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/SagaSnapshotDocument.cs
new file mode 100644
index 0000000000..9700497c1a
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Documents/SagaSnapshotDocument.cs
@@ -0,0 +1,98 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Documents
+{
+ using System;
+ using System.Collections.Generic;
+ using global::MongoDB.Bson.Serialization.Attributes;
+ using ServiceControl.SagaAudit;
+
+ class SagaSnapshotDocument
+ {
+ [BsonId]
+ public string Id { get; set; }
+
+ [BsonElement("sagaId")]
+ [BsonGuidRepresentation(global::MongoDB.Bson.GuidRepresentation.Standard)]
+ public Guid SagaId { get; set; }
+
+ [BsonElement("sagaType")]
+ public string SagaType { get; set; }
+
+ [BsonElement("startTime")]
+ public DateTime StartTime { get; set; }
+
+ [BsonElement("finishTime")]
+ public DateTime FinishTime { get; set; }
+
+ [BsonElement("status")]
+ public SagaStateChangeStatus Status { get; set; }
+
+ [BsonElement("stateAfterChange")]
+ public string StateAfterChange { get; set; }
+
+ [BsonElement("initiatingMessage")]
+ [BsonIgnoreIfNull]
+ public InitiatingMessageDocument InitiatingMessage { get; set; }
+
+ [BsonElement("outgoingMessages")]
+ public List OutgoingMessages { get; set; }
+
+ [BsonElement("endpoint")]
+ public string Endpoint { get; set; }
+
+ [BsonElement("processedAt")]
+ public DateTime ProcessedAt { get; set; }
+
+ [BsonElement("expiresAt")]
+ public DateTime ExpiresAt { get; set; }
+ }
+
+ class InitiatingMessageDocument
+ {
+ [BsonElement("messageId")]
+ public string MessageId { get; set; }
+
+ [BsonElement("messageType")]
+ public string MessageType { get; set; }
+
+ [BsonElement("isSagaTimeoutMessage")]
+ public bool IsSagaTimeoutMessage { get; set; }
+
+ [BsonElement("originatingMachine")]
+ public string OriginatingMachine { get; set; }
+
+ [BsonElement("originatingEndpoint")]
+ public string OriginatingEndpoint { get; set; }
+
+ [BsonElement("timeSent")]
+ public DateTime TimeSent { get; set; }
+
+ [BsonElement("intent")]
+ public string Intent { get; set; }
+ }
+
+ class ResultingMessageDocument
+ {
+ [BsonElement("messageId")]
+ public string MessageId { get; set; }
+
+ [BsonElement("messageType")]
+ public string MessageType { get; set; }
+
+ [BsonElement("destination")]
+ public string Destination { get; set; }
+
+ [BsonElement("timeSent")]
+ public DateTime TimeSent { get; set; }
+
+ [BsonElement("intent")]
+ public string Intent { get; set; }
+
+ [BsonElement("deliveryDelay")]
+ [BsonIgnoreIfNull]
+ public string DeliveryDelay { get; set; }
+
+ [BsonElement("deliverAt")]
+ [BsonIgnoreIfNull]
+ public DateTime? DeliverAt { get; set; }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/IMongoClientProvider.cs b/src/ServiceControl.Audit.Persistence.MongoDB/IMongoClientProvider.cs
new file mode 100644
index 0000000000..57b55c6a28
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/IMongoClientProvider.cs
@@ -0,0 +1,26 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using global::MongoDB.Driver;
+ using ProductCapabilities;
+
+ ///
+ /// Provides access to the MongoDB client and database.
+ ///
+ public interface IMongoClientProvider
+ {
+ ///
+ /// Gets the MongoDB client instance.
+ ///
+ IMongoClient Client { get; }
+
+ ///
+ /// Gets the configured database.
+ ///
+ IMongoDatabase Database { get; }
+
+ ///
+ /// Gets the detected product capabilities for the connected MongoDB-compatible database.
+ ///
+ IMongoProductCapabilities ProductCapabilities { get; }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/IMongoPersistenceLifecycle.cs b/src/ServiceControl.Audit.Persistence.MongoDB/IMongoPersistenceLifecycle.cs
new file mode 100644
index 0000000000..6a26e8869f
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/IMongoPersistenceLifecycle.cs
@@ -0,0 +1,21 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System.Threading;
+ using System.Threading.Tasks;
+
+ ///
+ /// Manages the lifecycle of the MongoDB persistence layer.
+ ///
+ interface IMongoPersistenceLifecycle
+ {
+ ///
+ /// Initializes the MongoDB client and verifies connectivity.
+ ///
+ Task Initialize(CancellationToken cancellationToken = default);
+
+ ///
+ /// Stops the persistence layer and releases resources.
+ ///
+ Task Stop(CancellationToken cancellationToken = default);
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Indexes/IndexDefinitions.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Indexes/IndexDefinitions.cs
new file mode 100644
index 0000000000..a50bf4cd83
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Indexes/IndexDefinitions.cs
@@ -0,0 +1,109 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Indexes
+{
+ using System;
+ using System.Collections.Generic;
+ using Documents;
+ using global::MongoDB.Driver;
+
+ static class IndexDefinitions
+ {
+ public static CreateIndexModel[] GetProcessedMessageIndexes()
+ {
+ // Text index covers metadata fields only. Body text search is handled
+ // by the separate messageBodySearch collection to avoid write path overhead.
+ var textIndexKeys = Builders.IndexKeys
+ .Text("messageMetadata.MessageId")
+ .Text("messageMetadata.MessageType")
+ .Text("messageMetadata.ConversationId")
+ .Text("headers");
+
+ return
+ [
+ // Primary sort index for default message listing
+ new(
+ Builders.IndexKeys.Descending(x => x.ProcessedAt),
+ new CreateIndexOptions { Name = "processedAt_desc" }),
+
+ // Alternative sort by time sent
+ new(
+ Builders.IndexKeys.Descending("messageMetadata.TimeSent"),
+ new CreateIndexOptions { Name = "timeSent_desc" }),
+
+ // Compound index for filtering by endpoint with processedAt sort
+ new(
+ Builders.IndexKeys
+ .Ascending("messageMetadata.ReceivingEndpoint.Name")
+ .Descending(x => x.ProcessedAt),
+ new CreateIndexOptions { Name = "endpoint_processedAt" }),
+
+ // Conversation queries (sparse since not all messages have conversations)
+ new(
+ Builders.IndexKeys.Ascending("messageMetadata.ConversationId"),
+ new CreateIndexOptions { Name = "conversationId", Sparse = true }),
+
+ // TTL index for automatic document expiration
+ new(
+ Builders.IndexKeys.Ascending(x => x.ExpiresAt),
+ new CreateIndexOptions { Name = "ttl_expiry", ExpireAfter = TimeSpan.Zero }),
+
+ // Compound text index for full-text search
+ // Indexes metadata fields directly (no duplication) plus header values (and optionally body content)
+ new(textIndexKeys, new CreateIndexOptions { Name = "text_search" })
+ ];
+ }
+
+ public static CreateIndexModel[] SagaSnapshots =>
+ [
+ // SagaHistory aggregation queries
+ new(
+ Builders.IndexKeys.Ascending(x => x.SagaId),
+ new CreateIndexOptions { Name = "sagaId" }),
+
+ // TTL index for automatic document expiration
+ new(
+ Builders.IndexKeys.Ascending(x => x.ExpiresAt),
+ new CreateIndexOptions { Name = "ttl_expiry", ExpireAfter = TimeSpan.Zero })
+ ];
+
+ public static CreateIndexModel[] KnownEndpoints =>
+ [
+ // Compound index for endpoint lookup (also serves as unique constraint via _id)
+ new(
+ Builders.IndexKeys
+ .Ascending(x => x.Name)
+ .Ascending(x => x.HostId),
+ new CreateIndexOptions { Name = "name_hostId" }),
+
+ // TTL index for automatic document expiration
+ new(
+ Builders.IndexKeys.Ascending(x => x.ExpiresAt),
+ new CreateIndexOptions { Name = "ttl_expiry", ExpireAfter = TimeSpan.Zero })
+ ];
+
+ public static CreateIndexModel[] GetMessageBodyIndexes(bool enableFullTextSearch)
+ {
+ var indexes = new List>
+ {
+ // TTL index for automatic document expiration
+ new(
+ Builders.IndexKeys.Ascending(x => x.ExpiresAt),
+ new CreateIndexOptions { Name = "ttl_expiry", ExpireAfter = TimeSpan.Zero })
+ };
+
+ if (enableFullTextSearch)
+ {
+ // Text index on body content for full-text search (async, eventual consistency)
+ indexes.Add(new(
+ Builders.IndexKeys.Text(x => x.TextBody),
+ new CreateIndexOptions { Name = "body_text_search" }));
+ }
+
+ return [.. indexes];
+ }
+
+ public static CreateIndexModel[] FailedAuditImports =>
+ [
+ // No additional indexes needed - queries are by _id only
+ ];
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Indexes/IndexInitializer.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Indexes/IndexInitializer.cs
new file mode 100644
index 0000000000..5fc6617bc7
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Indexes/IndexInitializer.cs
@@ -0,0 +1,62 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Indexes
+{
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Collections;
+ using Documents;
+ using global::MongoDB.Driver;
+ using Microsoft.Extensions.Logging;
+
+ class IndexInitializer(IMongoClientProvider clientProvider, MongoSettings settings, ILogger logger)
+ {
+ public async Task CreateIndexes(CancellationToken cancellationToken = default)
+ {
+ var database = clientProvider.Database;
+
+ await CreateCollectionIndexes(
+ database.GetCollection(CollectionNames.ProcessedMessages),
+ IndexDefinitions.GetProcessedMessageIndexes(),
+ cancellationToken).ConfigureAwait(false);
+
+ await CreateCollectionIndexes(
+ database.GetCollection(CollectionNames.SagaSnapshots),
+ IndexDefinitions.SagaSnapshots,
+ cancellationToken).ConfigureAwait(false);
+
+ await CreateCollectionIndexes(
+ database.GetCollection(CollectionNames.KnownEndpoints),
+ IndexDefinitions.KnownEndpoints,
+ cancellationToken).ConfigureAwait(false);
+
+ if (settings.BodyStorageType == BodyStorageType.Database)
+ {
+ await CreateCollectionIndexes(
+ database.GetCollection(CollectionNames.MessageBodies),
+ IndexDefinitions.GetMessageBodyIndexes(settings.EnableFullTextSearchOnBodies),
+ cancellationToken).ConfigureAwait(false);
+ }
+
+ // FailedAuditImports has no additional indexes - queries are by _id only
+ }
+
+ async Task CreateCollectionIndexes(
+ IMongoCollection collection,
+ CreateIndexModel[] indexes,
+ CancellationToken cancellationToken)
+ {
+ if (indexes.Length == 0)
+ {
+ return;
+ }
+
+ logger.LogInformation(
+ "Ensuring {IndexCount} indexes on collection '{CollectionName}'",
+ indexes.Length,
+ collection.CollectionNamespace.CollectionName);
+
+ _ = await collection.Indexes
+ .CreateManyAsync(indexes, cancellationToken)
+ .ConfigureAwait(false);
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MinimumRequiredStorageState.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MinimumRequiredStorageState.cs
new file mode 100644
index 0000000000..8472391c51
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MinimumRequiredStorageState.cs
@@ -0,0 +1,7 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ public class MinimumRequiredStorageState
+ {
+ public bool CanIngestMore { get; set; } = true;
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MongoAuditDataStore.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MongoAuditDataStore.cs
new file mode 100644
index 0000000000..20156d1436
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MongoAuditDataStore.cs
@@ -0,0 +1,605 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System;
+ using System.Collections.Generic;
+ using System.Linq;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Auditing.BodyStorage;
+ using Collections;
+ using Documents;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Driver;
+ using NServiceBus;
+ using ServiceControl.Audit.Auditing;
+ using ServiceControl.Audit.Auditing.MessagesView;
+ using ServiceControl.Audit.Infrastructure;
+ using ServiceControl.Audit.Monitoring;
+ using ServiceControl.Audit.Persistence.Infrastructure;
+ using ServiceControl.SagaAudit;
+
+ class MongoAuditDataStore(IMongoClientProvider clientProvider, IBodyStorage bodyStorage, MongoSettings settings) : IAuditDataStore
+ {
+ public async Task>> GetMessages(
+ bool includeSystemMessages,
+ PagingInfo pagingInfo,
+ SortInfo sortInfo,
+ DateTimeRange timeSentRange,
+ CancellationToken cancellationToken)
+ {
+ var collection = GetProcessedMessagesCollection();
+ var filter = BuildMessageFilter(includeSystemMessages, timeSentRange);
+ var sort = BuildSort(sortInfo);
+
+ var totalCount = await collection.CountDocumentsAsync(filter, cancellationToken: cancellationToken).ConfigureAwait(false);
+
+ var documents = await collection
+ .Find(filter)
+ .Sort(sort)
+ .Skip(pagingInfo.Offset)
+ .Limit(pagingInfo.PageSize)
+ .ToListAsync(cancellationToken)
+ .ConfigureAwait(false);
+
+ var results = documents.Select(ToMessagesView).ToList();
+
+ return new QueryResult>(results, new QueryStatsInfo(string.Empty, (int)totalCount));
+ }
+
+ public async Task>> QueryMessagesByReceivingEndpoint(
+ bool includeSystemMessages,
+ string endpointName,
+ PagingInfo pagingInfo,
+ SortInfo sortInfo,
+ DateTimeRange timeSentRange,
+ CancellationToken cancellationToken)
+ {
+ var collection = GetProcessedMessagesCollection();
+ var endpointFilter = Builders.Filter.Eq("messageMetadata.ReceivingEndpoint.Name", endpointName);
+ var baseFilter = BuildMessageFilter(includeSystemMessages, timeSentRange);
+ var filter = Builders.Filter.And(endpointFilter, baseFilter);
+ var sort = BuildSort(sortInfo);
+
+ var totalCount = await collection.CountDocumentsAsync(filter, cancellationToken: cancellationToken).ConfigureAwait(false);
+
+ var documents = await collection
+ .Find(filter)
+ .Sort(sort)
+ .Skip(pagingInfo.Offset)
+ .Limit(pagingInfo.PageSize)
+ .ToListAsync(cancellationToken)
+ .ConfigureAwait(false);
+
+ var results = documents.Select(ToMessagesView).ToList();
+
+ return new QueryResult>(results, new QueryStatsInfo(string.Empty, (int)totalCount));
+ }
+
+ public async Task>> QueryMessagesByConversationId(
+ string conversationId,
+ PagingInfo pagingInfo,
+ SortInfo sortInfo,
+ CancellationToken cancellationToken)
+ {
+ var collection = GetProcessedMessagesCollection();
+ var filter = Builders.Filter.Eq("messageMetadata.ConversationId", conversationId);
+ var sort = BuildSort(sortInfo);
+
+ var totalCount = await collection.CountDocumentsAsync(filter, cancellationToken: cancellationToken).ConfigureAwait(false);
+
+ var documents = await collection
+ .Find(filter)
+ .Sort(sort)
+ .Skip(pagingInfo.Offset)
+ .Limit(pagingInfo.PageSize)
+ .ToListAsync(cancellationToken)
+ .ConfigureAwait(false);
+
+ var results = documents.Select(ToMessagesView).ToList();
+
+ return new QueryResult>(results, new QueryStatsInfo(string.Empty, (int)totalCount));
+ }
+
+ public async Task>> QueryMessages(
+ string searchParam,
+ PagingInfo pagingInfo,
+ SortInfo sortInfo,
+ DateTimeRange timeSentRange,
+ CancellationToken cancellationToken)
+ {
+ var collection = GetProcessedMessagesCollection();
+ var timeRangeFilter = BuildTimeSentRangeFilter(timeSentRange);
+
+ if (!settings.EnableFullTextSearchOnBodies)
+ {
+ // Metadata-only search on processedMessages
+ var textFilter = Builders.Filter.Text(searchParam);
+ var filter = Builders.Filter.And(textFilter, timeRangeFilter);
+ return await ExecuteQuery(collection, filter, sortInfo, pagingInfo, cancellationToken).ConfigureAwait(false);
+ }
+
+ // Two-phase search: metadata + body
+ var combinedFilter = await BuildTwoPhaseSearchFilter(searchParam, timeRangeFilter, pagingInfo, cancellationToken).ConfigureAwait(false);
+ if (combinedFilter == null)
+ {
+ return new QueryResult>([], new QueryStatsInfo(string.Empty, 0));
+ }
+
+ return await ExecuteQuery(collection, combinedFilter, sortInfo, pagingInfo, cancellationToken).ConfigureAwait(false);
+ }
+
+ public async Task>> QueryMessagesByReceivingEndpointAndKeyword(
+ string endpoint,
+ string keyword,
+ PagingInfo pagingInfo,
+ SortInfo sortInfo,
+ DateTimeRange timeSentRange,
+ CancellationToken cancellationToken)
+ {
+ var collection = GetProcessedMessagesCollection();
+ var endpointFilter = Builders.Filter.Eq("messageMetadata.ReceivingEndpoint.Name", endpoint);
+ var timeRangeFilter = BuildTimeSentRangeFilter(timeSentRange);
+
+ if (!settings.EnableFullTextSearchOnBodies)
+ {
+ // Metadata-only search on processedMessages
+ var textFilter = Builders.Filter.Text(keyword);
+ var filter = Builders.Filter.And(textFilter, endpointFilter, timeRangeFilter);
+ return await ExecuteQuery(collection, filter, sortInfo, pagingInfo, cancellationToken).ConfigureAwait(false);
+ }
+
+ // Two-phase search: metadata + body
+ var baseFilter = Builders.Filter.And(endpointFilter, timeRangeFilter);
+ var combinedFilter = await BuildTwoPhaseSearchFilter(keyword, baseFilter, pagingInfo, cancellationToken).ConfigureAwait(false);
+ if (combinedFilter == null)
+ {
+ return new QueryResult>([], new QueryStatsInfo(string.Empty, 0));
+ }
+
+ return await ExecuteQuery(collection, combinedFilter, sortInfo, pagingInfo, cancellationToken).ConfigureAwait(false);
+ }
+
+ public async Task>> QueryKnownEndpoints(CancellationToken cancellationToken)
+ {
+ var collection = clientProvider.Database.GetCollection(CollectionNames.KnownEndpoints);
+
+ var documents = await collection
+ .Find(FilterDefinition.Empty)
+ .Limit(1024)
+ .ToListAsync(cancellationToken)
+ .ConfigureAwait(false);
+
+ var results = documents.Select(doc => new KnownEndpointsView
+ {
+ Id = DeterministicGuid.MakeId(doc.Name, doc.HostId.ToString()),
+ EndpointDetails = new EndpointDetails
+ {
+ Host = doc.Host,
+ HostId = doc.HostId,
+ Name = doc.Name
+ },
+ HostDisplayName = doc.Host
+ }).ToList();
+
+ return new QueryResult>(results, new QueryStatsInfo(string.Empty, results.Count));
+ }
+
+ public async Task> QuerySagaHistoryById(Guid sagaId, CancellationToken cancellationToken)
+ {
+ var collection = clientProvider.Database.GetCollection(CollectionNames.SagaSnapshots);
+
+ var snapshots = await collection
+ .Find(doc => doc.SagaId == sagaId)
+ .SortBy(doc => doc.StartTime)
+ .ToListAsync(cancellationToken)
+ .ConfigureAwait(false);
+
+ if (snapshots.Count == 0)
+ {
+ return QueryResult.Empty();
+ }
+
+ var sagaHistory = new SagaHistory
+ {
+ Id = sagaId,
+ SagaId = sagaId,
+ SagaType = snapshots[0].SagaType,
+ Changes = [.. snapshots.Select(snapshot => new SagaStateChange
+ {
+ StartTime = snapshot.StartTime,
+ FinishTime = snapshot.FinishTime,
+ Status = snapshot.Status,
+ StateAfterChange = snapshot.StateAfterChange,
+ Endpoint = snapshot.Endpoint,
+ InitiatingMessage = snapshot.InitiatingMessage != null
+ ? new InitiatingMessage
+ {
+ MessageId = snapshot.InitiatingMessage.MessageId,
+ MessageType = snapshot.InitiatingMessage.MessageType,
+ IsSagaTimeoutMessage = snapshot.InitiatingMessage.IsSagaTimeoutMessage,
+ OriginatingMachine = snapshot.InitiatingMessage.OriginatingMachine,
+ OriginatingEndpoint = snapshot.InitiatingMessage.OriginatingEndpoint,
+ TimeSent = snapshot.InitiatingMessage.TimeSent,
+ Intent = snapshot.InitiatingMessage.Intent
+ }
+ : null,
+ OutgoingMessages = snapshot.OutgoingMessages?.Select(msg => new ResultingMessage
+ {
+ MessageId = msg.MessageId,
+ MessageType = msg.MessageType,
+ Destination = msg.Destination,
+ TimeSent = msg.TimeSent,
+ Intent = msg.Intent,
+ DeliveryDelay = !string.IsNullOrEmpty(msg.DeliveryDelay) ? TimeSpan.Parse(msg.DeliveryDelay) : null,
+ DeliverAt = msg.DeliverAt
+ }).ToList() ?? []
+ })]
+ };
+
+ return new QueryResult(sagaHistory, new QueryStatsInfo(string.Empty, 1));
+ }
+
+ public async Task>> QueryAuditCounts(string endpointName, CancellationToken cancellationToken)
+ {
+ var collection = GetProcessedMessagesCollection();
+ var results = new List();
+
+ // Find oldest message for this endpoint
+ var endpointFilter = Builders.Filter.Eq("messageMetadata.ReceivingEndpoint.Name", endpointName);
+
+ var oldestMsg = await collection
+ .Find(endpointFilter)
+ .Sort(Builders.Sort.Ascending(x => x.ProcessedAt))
+ .Limit(1)
+ .FirstOrDefaultAsync(cancellationToken)
+ .ConfigureAwait(false);
+
+ if (oldestMsg == null)
+ {
+ return new QueryResult>(results, QueryStatsInfo.Zero);
+ }
+
+ var endDate = DateTime.UtcNow.Date.AddDays(1);
+ var oldestMsgDate = oldestMsg.ProcessedAt.ToUniversalTime().Date;
+ var thirtyDaysAgo = endDate.AddDays(-30);
+
+ var startDate = oldestMsgDate > thirtyDaysAgo ? oldestMsgDate : thirtyDaysAgo;
+
+ // Query each day - similar to RavenDB implementation
+ for (var date = startDate; date < endDate; date = date.AddDays(1))
+ {
+ var nextDate = date.AddDays(1);
+
+ var dayFilter = Builders.Filter.And(
+ Builders.Filter.Eq("messageMetadata.ReceivingEndpoint.Name", endpointName),
+ Builders.Filter.Eq("messageMetadata.IsSystemMessage", false),
+ Builders.Filter.Gte(x => x.ProcessedAt, date),
+ Builders.Filter.Lt(x => x.ProcessedAt, nextDate)
+ );
+
+ var count = await collection.CountDocumentsAsync(dayFilter, cancellationToken: cancellationToken).ConfigureAwait(false);
+
+ if (count > 0)
+ {
+ results.Add(new AuditCount { UtcDate = date, Count = count });
+ }
+ }
+
+ return new QueryResult>(results, QueryStatsInfo.Zero);
+ }
+
+ public async Task GetMessageBody(string messageId, CancellationToken cancellationToken)
+ {
+ var result = await bodyStorage.TryFetch(messageId, cancellationToken).ConfigureAwait(false);
+
+ if (result.HasResult)
+ {
+ return MessageBodyView.FromStream(
+ result.Stream,
+ result.ContentType,
+ result.BodySize,
+ result.Etag
+ );
+ }
+
+ return MessageBodyView.NotFound();
+ }
+
+ async Task> BuildTwoPhaseSearchFilter(
+ string searchParam,
+ FilterDefinition additionalFilter,
+ PagingInfo pagingInfo,
+ CancellationToken cancellationToken)
+ {
+ var collection = GetProcessedMessagesCollection();
+ var idLimit = pagingInfo.Offset + pagingInfo.PageSize + 500;
+
+ // Phase 1: Metadata text search on processedMessages
+ var metadataTextFilter = Builders.Filter.Text(searchParam);
+ var metadataFilter = Builders.Filter.And(metadataTextFilter, additionalFilter);
+ var metadataIdsTask = collection
+ .Find(metadataFilter)
+ .Project(d => d.Id)
+ .Limit(idLimit)
+ .ToListAsync(cancellationToken);
+
+ // Phase 2: Body text search on messageBodies (in parallel)
+ var bodySearchCollection = clientProvider.Database
+ .GetCollection(CollectionNames.MessageBodies);
+ var bodyTextFilter = Builders.Filter.Text(searchParam);
+ var bodyIdsTask = bodySearchCollection
+ .Find(bodyTextFilter)
+ .Project(d => d.Id)
+ .Limit(idLimit)
+ .ToListAsync(cancellationToken);
+
+ _ = await Task.WhenAll(metadataIdsTask, bodyIdsTask).ConfigureAwait(false);
+
+ var allIds = new HashSet(metadataIdsTask.Result);
+ allIds.UnionWith(bodyIdsTask.Result);
+
+ if (allIds.Count == 0)
+ {
+ return null;
+ }
+
+ // Fetch full documents by merged IDs with additional filters
+ var idFilter = Builders.Filter.In(d => d.Id, allIds);
+ return Builders.Filter.And(idFilter, additionalFilter);
+ }
+
+ static async Task>> ExecuteQuery(
+ IMongoCollection collection,
+ FilterDefinition filter,
+ SortInfo sortInfo,
+ PagingInfo pagingInfo,
+ CancellationToken cancellationToken)
+ {
+ var sort = BuildSort(sortInfo);
+
+ var totalCount = await collection.CountDocumentsAsync(filter, cancellationToken: cancellationToken).ConfigureAwait(false);
+
+ var documents = await collection
+ .Find(filter)
+ .Sort(sort)
+ .Skip(pagingInfo.Offset)
+ .Limit(pagingInfo.PageSize)
+ .ToListAsync(cancellationToken)
+ .ConfigureAwait(false);
+
+ var results = documents.Select(ToMessagesView).ToList();
+
+ return new QueryResult>(results, new QueryStatsInfo(string.Empty, (int)totalCount));
+ }
+
+ IMongoCollection GetProcessedMessagesCollection()
+ => clientProvider.Database.GetCollection(CollectionNames.ProcessedMessages);
+
+ static FilterDefinition BuildMessageFilter(bool includeSystemMessages, DateTimeRange timeSentRange)
+ {
+ var filters = new List>();
+
+ if (!includeSystemMessages)
+ {
+ filters.Add(Builders.Filter.Ne("messageMetadata.IsSystemMessage", true));
+ }
+
+ if (timeSentRange?.From != null)
+ {
+ filters.Add(Builders.Filter.Gte("messageMetadata.TimeSent", timeSentRange.From.Value));
+ }
+
+ if (timeSentRange?.To != null)
+ {
+ filters.Add(Builders.Filter.Lte("messageMetadata.TimeSent", timeSentRange.To.Value));
+ }
+
+ return filters.Count > 0
+ ? Builders.Filter.And(filters)
+ : FilterDefinition.Empty;
+ }
+
+ static FilterDefinition BuildTimeSentRangeFilter(DateTimeRange timeSentRange)
+ {
+ var filters = new List>();
+
+ if (timeSentRange?.From != null)
+ {
+ filters.Add(Builders.Filter.Gte("messageMetadata.TimeSent", timeSentRange.From.Value));
+ }
+
+ if (timeSentRange?.To != null)
+ {
+ filters.Add(Builders.Filter.Lte("messageMetadata.TimeSent", timeSentRange.To.Value));
+ }
+
+ return filters.Count > 0
+ ? Builders.Filter.And(filters)
+ : FilterDefinition.Empty;
+ }
+
+ static SortDefinition BuildSort(SortInfo sortInfo)
+ {
+ var sortField = sortInfo?.Sort?.ToLowerInvariant() switch
+ {
+ "time_sent" => "messageMetadata.TimeSent",
+ _ => "processedAt"
+ };
+
+ var isDescending = sortInfo?.Direction?.ToLowerInvariant() != "asc";
+
+ return isDescending
+ ? Builders.Sort.Descending(sortField)
+ : Builders.Sort.Ascending(sortField);
+ }
+
+ static MessagesView ToMessagesView(ProcessedMessageDocument doc)
+ {
+ var metadata = doc.MessageMetadata;
+
+ return new MessagesView
+ {
+ Id = doc.Id,
+ MessageId = GetMetadataString(metadata, "MessageId"),
+ MessageType = GetMetadataString(metadata, "MessageType"),
+ TimeSent = GetMetadataDateTime(metadata, "TimeSent"),
+ ProcessedAt = doc.ProcessedAt,
+ CriticalTime = GetMetadataTimeSpan(metadata, "CriticalTime"),
+ ProcessingTime = GetMetadataTimeSpan(metadata, "ProcessingTime"),
+ DeliveryTime = GetMetadataTimeSpan(metadata, "DeliveryTime"),
+ IsSystemMessage = GetMetadataBool(metadata, "IsSystemMessage"),
+ ConversationId = GetMetadataString(metadata, "ConversationId"),
+ ReceivingEndpoint = GetMetadataEndpoint(metadata, "ReceivingEndpoint"),
+ SendingEndpoint = GetMetadataEndpoint(metadata, "SendingEndpoint"),
+ Headers = doc.Headers?.Select(h => new KeyValuePair(h.Key, h.Value)),
+ BodyUrl = GetMetadataString(metadata, "BodyUrl"),
+ BodySize = GetMetadataInt(metadata, "ContentLength"),
+ Status = MessageStatus.Successful,
+ MessageIntent = GetMetadataMessageIntent(metadata, "MessageIntent"),
+ InvokedSagas = GetMetadataSagaInfoList(metadata, "InvokedSagas"),
+ OriginatesFromSaga = GetMetadataSagaInfo(metadata, "OriginatesFromSaga")
+ };
+ }
+
+ static string GetMetadataString(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull)
+ {
+ return null;
+ }
+
+ return value.AsString;
+ }
+
+ static DateTime? GetMetadataDateTime(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull)
+ {
+ return null;
+ }
+
+ return value.ToUniversalTime();
+ }
+
+ static bool GetMetadataBool(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull)
+ {
+ return false;
+ }
+
+ return value.AsBoolean;
+ }
+
+ static int GetMetadataInt(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull)
+ {
+ return 0;
+ }
+
+ return value.ToInt32();
+ }
+
+ static TimeSpan GetMetadataTimeSpan(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull)
+ {
+ return TimeSpan.Zero;
+ }
+
+ if (value.IsString && TimeSpan.TryParse(value.AsString, out var result))
+ {
+ return result;
+ }
+
+ return TimeSpan.Zero;
+ }
+
+ static EndpointDetails GetMetadataEndpoint(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull || !value.IsBsonDocument)
+ {
+ return null;
+ }
+
+ var endpointDoc = value.AsBsonDocument;
+
+ return new EndpointDetails
+ {
+ Name = endpointDoc.TryGetValue("Name", out var name) && !name.IsBsonNull ? name.AsString : null,
+ Host = endpointDoc.TryGetValue("Host", out var host) && !host.IsBsonNull ? host.AsString : null,
+ HostId = endpointDoc.TryGetValue("HostId", out var hostId) && !hostId.IsBsonNull ? BsonValueToGuid(hostId) : Guid.Empty
+ };
+ }
+
+ static MessageIntent GetMetadataMessageIntent(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull)
+ {
+ return MessageIntent.Send;
+ }
+
+ if (value.IsString && Enum.TryParse(value.AsString, out var result))
+ {
+ return result;
+ }
+
+ if (value.IsInt32)
+ {
+ return (MessageIntent)value.AsInt32;
+ }
+
+ return MessageIntent.Send;
+ }
+
+ static List GetMetadataSagaInfoList(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull || !value.IsBsonArray)
+ {
+ return null;
+ }
+
+ return value.AsBsonArray
+ .Where(v => v.IsBsonDocument)
+ .Select(v => MapSagaInfo(v.AsBsonDocument))
+ .Where(s => s != null)
+ .ToList();
+ }
+
+ static SagaInfo GetMetadataSagaInfo(BsonDocument metadata, string key)
+ {
+ if (metadata == null || !metadata.TryGetValue(key, out var value) || value.IsBsonNull || !value.IsBsonDocument)
+ {
+ return null;
+ }
+
+ return MapSagaInfo(value.AsBsonDocument);
+ }
+
+ static SagaInfo MapSagaInfo(BsonDocument doc)
+ {
+ if (doc == null)
+ {
+ return null;
+ }
+
+ return new SagaInfo
+ {
+ SagaId = doc.TryGetValue("SagaId", out var sagaId) && !sagaId.IsBsonNull ? BsonValueToGuid(sagaId) : Guid.Empty,
+ SagaType = doc.TryGetValue("SagaType", out var sagaType) && !sagaType.IsBsonNull ? sagaType.AsString : null,
+ ChangeStatus = doc.TryGetValue("ChangeStatus", out var changeStatus) && !changeStatus.IsBsonNull ? changeStatus.AsString : null
+ };
+ }
+
+ // Handles Guids stored as either strings (from metadata) or BsonBinaryData (from document properties)
+ static Guid BsonValueToGuid(BsonValue value)
+ {
+ if (value.IsString)
+ {
+ return Guid.Parse(value.AsString);
+ }
+ return value.AsGuid;
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MongoClientProvider.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MongoClientProvider.cs
new file mode 100644
index 0000000000..e1bc3e6aa5
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MongoClientProvider.cs
@@ -0,0 +1,92 @@
+#nullable enable
+
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using global::MongoDB.Driver;
+ using ProductCapabilities;
+
+ ///
+ /// Provides access to the MongoDB client and database.
+ /// Manages the client lifecycle and product capability detection.
+ ///
+ class MongoClientProvider(MongoSettings settings) : IMongoClientProvider, IAsyncDisposable
+ {
+ readonly MongoSettings settings = settings;
+ IMongoClient? client;
+ IMongoDatabase? database;
+ IMongoProductCapabilities? productCapabilities;
+ bool initialized;
+
+ public IMongoClient Client
+ {
+ get
+ {
+ EnsureInitialized();
+ return client!;
+ }
+ }
+
+ public IMongoDatabase Database
+ {
+ get
+ {
+ EnsureInitialized();
+ return database!;
+ }
+ }
+
+ public IMongoProductCapabilities ProductCapabilities
+ {
+ get
+ {
+ EnsureInitialized();
+ return productCapabilities!;
+ }
+ }
+
+ public async Task InitializeAsync(CancellationToken cancellationToken = default)
+ {
+ if (initialized)
+ {
+ return;
+ }
+
+ var mongoUrl = MongoUrl.Create(settings.ConnectionString);
+ var clientSettings = MongoClientSettings.FromUrl(mongoUrl);
+
+ // Configure client settings
+ clientSettings.ApplicationName = "ServiceControl.Audit";
+
+ client = new MongoClient(clientSettings);
+ database = client.GetDatabase(settings.DatabaseName);
+
+ // Detect product capabilities
+ productCapabilities = await MongoProductDetector.DetectAsync(client, settings.ConnectionString, cancellationToken).ConfigureAwait(false);
+
+ initialized = true;
+ }
+
+ public ValueTask DisposeAsync()
+ {
+ // MongoClient doesn't need explicit disposal in MongoDB.Driver 3.x
+ // but we implement IAsyncDisposable for future-proofing
+ client = null;
+ database = null;
+ productCapabilities = null;
+ initialized = false;
+
+ return ValueTask.CompletedTask;
+ }
+
+ void EnsureInitialized()
+ {
+ if (!initialized)
+ {
+ throw new InvalidOperationException("MongoClientProvider has not been initialized. Call InitializeAsync first.");
+ }
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MongoFailedAuditStorage.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MongoFailedAuditStorage.cs
new file mode 100644
index 0000000000..8a97e006f0
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MongoFailedAuditStorage.cs
@@ -0,0 +1,85 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System;
+ using System.Collections.Generic;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Auditing;
+ using Collections;
+ using Documents;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Driver;
+
+ class MongoFailedAuditStorage(IMongoClientProvider clientProvider) : IFailedAuditStorage
+ {
+ IMongoCollection GetCollection() =>
+ clientProvider.Database.GetCollection(CollectionNames.FailedAuditImports);
+
+ public async Task SaveFailedAuditImport(FailedAuditImport message)
+ {
+ var document = new FailedAuditImportDocument
+ {
+ Id = ObjectId.GenerateNewId(),
+ MessageId = message.Message.Id,
+ Headers = message.Message.Headers,
+ Body = message.Message.Body,
+ ExceptionInfo = message.ExceptionInfo
+ };
+
+ await GetCollection().InsertOneAsync(document).ConfigureAwait(false);
+ }
+
+ public async Task ProcessFailedMessages(
+ Func, CancellationToken, Task> onMessage,
+ CancellationToken cancellationToken)
+ {
+ var collection = GetCollection();
+ var documentsToDelete = new List();
+
+ using var cursor = await collection.FindAsync(
+ FilterDefinition.Empty,
+ cancellationToken: cancellationToken).ConfigureAwait(false);
+
+ while (await cursor.MoveNextAsync(cancellationToken).ConfigureAwait(false))
+ {
+ foreach (var document in cursor.Current)
+ {
+ if (cancellationToken.IsCancellationRequested)
+ {
+ break;
+ }
+
+ var transportMessage = new FailedTransportMessage
+ {
+ Id = document.MessageId,
+ Headers = document.Headers,
+ Body = document.Body
+ };
+
+ var documentId = document.Id;
+
+ await onMessage(
+ transportMessage,
+ _ =>
+ {
+ documentsToDelete.Add(documentId);
+ return Task.CompletedTask;
+ },
+ cancellationToken).ConfigureAwait(false);
+ }
+ }
+
+ if (documentsToDelete.Count > 0)
+ {
+ var filter = Builders.Filter.In(d => d.Id, documentsToDelete);
+ _ = await collection.DeleteManyAsync(filter, cancellationToken).ConfigureAwait(false);
+ }
+ }
+
+ public async Task GetFailedAuditsCount()
+ {
+ var count = await GetCollection().CountDocumentsAsync(FilterDefinition.Empty).ConfigureAwait(false);
+ return (int)count;
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistence.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistence.cs
new file mode 100644
index 0000000000..7188636836
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistence.cs
@@ -0,0 +1,99 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System;
+ using System.Threading.Channels;
+ using Auditing.BodyStorage;
+ using BodyStorage;
+ using Indexes;
+ using Microsoft.Extensions.DependencyInjection;
+ using Persistence.UnitOfWork;
+ using Search;
+ using UnitOfWork;
+
+ class MongoPersistence(MongoSettings settings) : IPersistence
+ {
+ public void AddPersistence(IServiceCollection services)
+ {
+ ConfigureLifecycle(services, settings);
+
+ // Register product capabilities - will be populated during initialization
+ services.AddSingleton(sp =>
+ sp.GetRequiredService().ProductCapabilities);
+
+ // Async body storage - decouples body writes (and optional FTS text index) from main write path
+ if (settings.BodyStorageType == BodyStorageType.Database)
+ {
+ var channel = Channel.CreateBounded(new BoundedChannelOptions(10_000)
+ {
+ SingleReader = true,
+ SingleWriter = false,
+ FullMode = BoundedChannelFullMode.Wait
+ });
+ services.AddSingleton(channel);
+ services.AddHostedService();
+ }
+
+ // Unit of work for audit ingestion
+ services.AddSingleton(sp =>
+ {
+ var channel = sp.GetService>();
+ return new MongoAuditIngestionUnitOfWorkFactory(
+ sp.GetRequiredService(),
+ sp.GetRequiredService(),
+ sp.GetRequiredService(),
+ sp.GetRequiredService(),
+ channel);
+ });
+
+ // Failed audit storage
+ services.AddSingleton();
+
+ // Body storage - register based on configuration
+ RegisterBodyStorage(services, settings);
+
+ // Audit data store for queries
+ services.AddSingleton();
+
+ // Storage pressure monitoring - checked by CheckMongoDbCachePressure (auto-loaded by NServiceBus)
+ services.AddSingleton();
+ }
+
+ public void AddInstaller(IServiceCollection services) => ConfigureLifecycle(services, settings);
+
+ static void RegisterBodyStorage(IServiceCollection services, MongoSettings settings)
+ {
+ switch (settings.BodyStorageType)
+ {
+ case BodyStorageType.None:
+ services.AddSingleton();
+ break;
+
+ case BodyStorageType.Database:
+ services.AddSingleton();
+ break;
+
+ case BodyStorageType.FileSystem:
+ services.AddSingleton();
+ break;
+
+ default:
+ throw new ArgumentOutOfRangeException(nameof(settings.BodyStorageType), settings.BodyStorageType, "Unknown body storage type");
+ }
+ }
+
+ static void ConfigureLifecycle(IServiceCollection services, MongoSettings settings)
+ {
+ services.AddSingleton(settings);
+
+ services.AddSingleton();
+ services.AddSingleton(sp => sp.GetRequiredService());
+
+ services.AddSingleton();
+
+ services.AddSingleton();
+ services.AddSingleton(sp => sp.GetRequiredService());
+
+ services.AddHostedService();
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceConfiguration.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceConfiguration.cs
new file mode 100644
index 0000000000..3ee637b7d4
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceConfiguration.cs
@@ -0,0 +1,110 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System;
+ using System.Collections.Generic;
+
+ public class MongoPersistenceConfiguration : IPersistenceConfiguration
+ {
+ public const string ConnectionStringKey = "Database/ConnectionString";
+ public const string BodyStorageTypeKey = "Database/BodyStorageType";
+ public const string BodyStoragePathKey = "Database/BodyStoragePath";
+ public const string BodyWriterBatchSizeKey = "Database/BodyWriterBatchSize";
+ public const string BodyWriterParallelWritersKey = "Database/BodyWriterParallelWriters";
+ public const string BodyWriterBatchTimeoutKey = "Database/BodyWriterBatchTimeout";
+
+ public IEnumerable ConfigurationKeys =>
+ [
+ ConnectionStringKey,
+ BodyStorageTypeKey,
+ BodyStoragePathKey,
+ BodyWriterBatchSizeKey,
+ BodyWriterParallelWritersKey,
+ BodyWriterBatchTimeoutKey
+ ];
+
+ public string Name => "MongoDB";
+
+ public IPersistence Create(PersistenceSettings settings)
+ {
+ var mongoSettings = GetMongoSettings(settings);
+ return new MongoPersistence(mongoSettings);
+ }
+
+ internal static MongoSettings GetMongoSettings(PersistenceSettings settings)
+ {
+ if (!settings.PersisterSpecificSettings.TryGetValue(ConnectionStringKey, out var connectionString))
+ {
+ throw new InvalidOperationException($"{ConnectionStringKey} must be specified.");
+ }
+
+ if (string.IsNullOrWhiteSpace(connectionString))
+ {
+ throw new InvalidOperationException($"{ConnectionStringKey} cannot be empty.");
+ }
+
+ // Extract database name from connection string, default to "audit" if not specified
+ var mongoUrl = global::MongoDB.Driver.MongoUrl.Create(connectionString);
+ var databaseName = string.IsNullOrWhiteSpace(mongoUrl.DatabaseName) ? "audit" : mongoUrl.DatabaseName;
+
+ // Body storage type - defaults to Database
+ var bodyStorageType = BodyStorageType.Database;
+ if (settings.PersisterSpecificSettings.TryGetValue(BodyStorageTypeKey, out var bodyStorageTypeValue))
+ {
+ if (Enum.TryParse(bodyStorageTypeValue, ignoreCase: true, out var parsed))
+ {
+ bodyStorageType = parsed;
+ }
+ }
+
+ // Body storage path - required for FileSystem storage
+ _ = settings.PersisterSpecificSettings.TryGetValue(BodyStoragePathKey, out var bodyStoragePath);
+
+ if (bodyStorageType == BodyStorageType.FileSystem && string.IsNullOrWhiteSpace(bodyStoragePath))
+ {
+ throw new InvalidOperationException($"{BodyStoragePathKey} must be specified when BodyStorageType is FileSystem.");
+ }
+
+ // Body writer settings - auto-calculate from TargetMessageIngestionRate if set, otherwise use defaults
+ var hasExplicitBatchSize = settings.PersisterSpecificSettings.TryGetValue(BodyWriterBatchSizeKey, out var batchSizeValue);
+ var hasExplicitWriters = settings.PersisterSpecificSettings.TryGetValue(BodyWriterParallelWritersKey, out var writersValue);
+
+ var bodyWriterBatchSize = 500;
+ var bodyWriterParallelWriters = 4;
+
+ if (settings.TargetMessageIngestionRate is { } rate)
+ {
+ bodyWriterBatchSize = rate > 2000 ? 500 : 200;
+ bodyWriterParallelWriters = rate <= 500 ? 2 : 4;
+ }
+
+ if (hasExplicitBatchSize && int.TryParse(batchSizeValue, out var parsedBatchSize))
+ {
+ bodyWriterBatchSize = parsedBatchSize;
+ }
+
+ if (hasExplicitWriters && int.TryParse(writersValue, out var parsedWriters))
+ {
+ bodyWriterParallelWriters = parsedWriters;
+ }
+
+ TimeSpan? bodyWriterBatchTimeout = null;
+ if (settings.PersisterSpecificSettings.TryGetValue(BodyWriterBatchTimeoutKey, out var timeoutValue)
+ && TimeSpan.TryParse(timeoutValue, out var parsedTimeout))
+ {
+ bodyWriterBatchTimeout = parsedTimeout;
+ }
+
+ return new MongoSettings(
+ connectionString,
+ databaseName,
+ settings.AuditRetentionPeriod,
+ settings.EnableFullTextSearchOnBodies,
+ settings.MaxBodySizeToStore,
+ bodyStorageType,
+ bodyStoragePath,
+ bodyWriterBatchSize,
+ bodyWriterParallelWriters,
+ bodyWriterBatchTimeout);
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceLifecycle.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceLifecycle.cs
new file mode 100644
index 0000000000..8f523ca8ac
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceLifecycle.cs
@@ -0,0 +1,102 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System.Threading;
+ using System.Threading.Tasks;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Bson.Serialization;
+ using global::MongoDB.Bson.Serialization.Serializers;
+ using Indexes;
+ using Microsoft.Extensions.Logging;
+
+ ///
+ /// Manages the lifecycle of the MongoDB persistence layer.
+ /// Handles initialization, connectivity verification, and shutdown.
+ ///
+ class MongoPersistenceLifecycle(
+ MongoClientProvider clientProvider,
+ MongoSettings settings,
+ IndexInitializer indexInitializer,
+ ILogger logger) : IMongoPersistenceLifecycle
+ {
+ static bool serializersRegistered;
+ static readonly object serializerLock = new();
+
+ readonly MongoClientProvider clientProvider = clientProvider;
+ readonly MongoSettings settings = settings;
+ readonly IndexInitializer indexInitializer = indexInitializer;
+ readonly ILogger logger = logger;
+
+ public async Task Initialize(CancellationToken cancellationToken = default)
+ {
+ RegisterSerializers();
+ logger.LogInformation("Initializing MongoDB persistence for database '{DatabaseName}'", settings.DatabaseName);
+ logger.LogInformation("MongoDB settings: AuditRetentionPeriod={AuditRetentionPeriod}, BodyStorageType={BodyStorageType}, BodyStoragePath={BodyStoragePath}, EnableFullTextSearchOnBodies={EnableFullTextSearchOnBodies}, MaxBodySizeToStore={MaxBodySizeToStore}",
+ settings.AuditRetentionPeriod,
+ settings.BodyStorageType,
+ settings.BodyStoragePath ?? "(not set)",
+ settings.EnableFullTextSearchOnBodies,
+ settings.MaxBodySizeToStore);
+ logger.LogInformation("Body writer settings: BatchSize={BodyWriterBatchSize}, ParallelWriters={BodyWriterParallelWriters}, BatchTimeout={BodyWriterBatchTimeout}",
+ settings.BodyWriterBatchSize,
+ settings.BodyWriterParallelWriters,
+ settings.BodyWriterBatchTimeout);
+
+ // Initialize the client and detect product capabilities
+ await clientProvider.InitializeAsync(cancellationToken).ConfigureAwait(false);
+
+ // Verify connectivity with a ping
+ await VerifyConnectivity(cancellationToken).ConfigureAwait(false);
+
+ // Create indexes
+ await indexInitializer.CreateIndexes(cancellationToken).ConfigureAwait(false);
+
+ logger.LogInformation(
+ "MongoDB persistence initialized. Product: {ProductName}, Database: {DatabaseName}",
+ clientProvider.ProductCapabilities.ProductName,
+ settings.DatabaseName);
+ }
+
+ public async Task Stop(CancellationToken cancellationToken = default)
+ {
+ logger.LogInformation("Stopping MongoDB persistence");
+ await clientProvider.DisposeAsync().ConfigureAwait(false);
+ logger.LogInformation("MongoDB persistence stopped");
+ }
+
+ async Task VerifyConnectivity(CancellationToken cancellationToken)
+ {
+ logger.LogInformation("Verifying MongoDB connectivity");
+ var command = new BsonDocument("ping", 1);
+ _ = await clientProvider.Database.RunCommandAsync(command, cancellationToken: cancellationToken).ConfigureAwait(false);
+ logger.LogInformation("MongoDB connectivity verified");
+ }
+
+ static void RegisterSerializers()
+ {
+ if (serializersRegistered)
+ {
+ return;
+ }
+
+ lock (serializerLock)
+ {
+ if (serializersRegistered)
+ {
+ return;
+ }
+
+ // Register Guid serializer with Standard representation to avoid "GuidRepresentation is Unspecified" errors
+ try
+ {
+ BsonSerializer.RegisterSerializer(new GuidSerializer(GuidRepresentation.Standard));
+ }
+ catch (BsonSerializationException)
+ {
+ // Serializer already registered by another component - this is fine
+ }
+
+ serializersRegistered = true;
+ }
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceLifecycleHostedService.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceLifecycleHostedService.cs
new file mode 100644
index 0000000000..ffc31d3269
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MongoPersistenceLifecycleHostedService.cs
@@ -0,0 +1,16 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Microsoft.Extensions.Hosting;
+
+ ///
+ /// Hosted service that manages the MongoDB persistence lifecycle.
+ ///
+ sealed class MongoPersistenceLifecycleHostedService(IMongoPersistenceLifecycle lifecycle) : IHostedService
+ {
+ public Task StartAsync(CancellationToken cancellationToken) => lifecycle.Initialize(cancellationToken);
+
+ public Task StopAsync(CancellationToken cancellationToken) => lifecycle.Stop(cancellationToken);
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/MongoSettings.cs b/src/ServiceControl.Audit.Persistence.MongoDB/MongoSettings.cs
new file mode 100644
index 0000000000..328cc9bbb3
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/MongoSettings.cs
@@ -0,0 +1,28 @@
+namespace ServiceControl.Audit.Persistence.MongoDB
+{
+ using System;
+
+ public class MongoSettings(
+ string connectionString,
+ string databaseName,
+ TimeSpan auditRetentionPeriod,
+ bool enableFullTextSearchOnBodies,
+ int maxBodySizeToStore,
+ BodyStorageType bodyStorageType = BodyStorageType.Database,
+ string bodyStoragePath = null,
+ int bodyWriterBatchSize = 500,
+ int bodyWriterParallelWriters = 4,
+ TimeSpan? bodyWriterBatchTimeout = null)
+ {
+ public string ConnectionString { get; } = connectionString;
+ public string DatabaseName { get; } = databaseName;
+ public TimeSpan AuditRetentionPeriod { get; } = auditRetentionPeriod;
+ public bool EnableFullTextSearchOnBodies { get; } = enableFullTextSearchOnBodies;
+ public int MaxBodySizeToStore { get; } = maxBodySizeToStore;
+ public BodyStorageType BodyStorageType { get; } = bodyStorageType;
+ public string BodyStoragePath { get; } = bodyStoragePath;
+ public int BodyWriterBatchSize { get; } = bodyWriterBatchSize;
+ public int BodyWriterParallelWriters { get; } = bodyWriterParallelWriters;
+ public TimeSpan BodyWriterBatchTimeout { get; } = bodyWriterBatchTimeout ?? TimeSpan.FromMilliseconds(500);
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/AmazonDocumentDbCapabilities.cs b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/AmazonDocumentDbCapabilities.cs
new file mode 100644
index 0000000000..496e2faf05
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/AmazonDocumentDbCapabilities.cs
@@ -0,0 +1,23 @@
+#nullable enable
+
+namespace ServiceControl.Audit.Persistence.MongoDB.ProductCapabilities
+{
+ using System;
+
+ ///
+ /// Capabilities for Amazon DocumentDB.
+ /// This class represents standard (non-Elastic) cluster capabilities.
+ ///
+ public class AmazonDocumentDbCapabilities(bool isElasticCluster = false, Version? serverVersion = null) : IMongoProductCapabilities
+ {
+ public bool IsElasticCluster { get; } = isElasticCluster;
+ public string ProductName => IsElasticCluster ? "Amazon DocumentDB (Elastic)" : "Amazon DocumentDB";
+ public Version? ServerVersion { get; } = serverVersion;
+ public bool SupportsMultiCollectionBulkWrite => false;
+ public bool SupportsTextIndexes => true;
+ public bool SupportsTtlIndexes => true;
+ public int MaxDocumentSizeBytes => 16 * 1024 * 1024; // 16MB
+ public bool SupportsFacetAggregation => false;
+ public bool SupportsWiredTigerCacheMetrics => false;
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/AzureDocumentDbCapabilities.cs b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/AzureDocumentDbCapabilities.cs
new file mode 100644
index 0000000000..b812cf7955
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/AzureDocumentDbCapabilities.cs
@@ -0,0 +1,22 @@
+#nullable enable
+
+namespace ServiceControl.Audit.Persistence.MongoDB.ProductCapabilities
+{
+ using System;
+
+ ///
+ /// Capabilities for Azure DocumentDB (new Jan 2025 product).
+ /// Built on PostgreSQL with MongoDB wire protocol compatibility.
+ ///
+ public class AzureDocumentDbCapabilities(Version? serverVersion = null) : IMongoProductCapabilities
+ {
+ public string ProductName => "Azure DocumentDB";
+ public Version? ServerVersion { get; } = serverVersion;
+ public bool SupportsMultiCollectionBulkWrite => false;
+ public bool SupportsTextIndexes => true;
+ public bool SupportsTtlIndexes => true;
+ public int MaxDocumentSizeBytes => 16 * 1024 * 1024; // 16MB
+ public bool SupportsFacetAggregation => true;
+ public bool SupportsWiredTigerCacheMetrics => false;
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/IMongoProductCapabilities.cs b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/IMongoProductCapabilities.cs
new file mode 100644
index 0000000000..354cfb8ea7
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/IMongoProductCapabilities.cs
@@ -0,0 +1,70 @@
+#nullable enable
+
+namespace ServiceControl.Audit.Persistence.MongoDB.ProductCapabilities
+{
+ using System;
+
+ ///
+ /// Known MongoDB versions for feature detection.
+ ///
+ public static class MongoVersions
+ {
+ ///
+ /// MongoDB 8.0 - Introduces multi-collection bulk write operations.
+ ///
+ public static readonly Version Version8 = new(8, 0);
+ }
+
+ ///
+ /// Abstraction for product-specific capabilities across MongoDB-compatible databases.
+ /// Different products (MongoDB, Azure DocumentDB, Amazon DocumentDB) have varying
+ /// feature support levels.
+ ///
+ public interface IMongoProductCapabilities
+ {
+ ///
+ /// The name of the MongoDB-compatible product.
+ ///
+ string ProductName { get; }
+
+ ///
+ /// The server version, if known. May be null for cloud products where
+ /// version information is not available or meaningful.
+ ///
+ Version? ServerVersion { get; }
+
+ ///
+ /// Whether multi-collection bulk write operations are supported.
+ ///
+ bool SupportsMultiCollectionBulkWrite { get; }
+
+ ///
+ /// Whether text indexes are supported for full-text search.
+ ///
+ bool SupportsTextIndexes { get; }
+
+ ///
+ /// Whether TTL (Time-To-Live) indexes are supported for automatic document expiration.
+ ///
+ bool SupportsTtlIndexes { get; }
+
+ ///
+ /// Maximum document size in bytes.
+ /// Standard is 16MB (16,777,216 bytes) for all supported products.
+ ///
+ int MaxDocumentSizeBytes { get; }
+
+ ///
+ /// Whether the $facet aggregation stage is supported.
+ /// When $facet is not supported, use multiple queries instead.
+ ///
+ bool SupportsFacetAggregation { get; }
+
+ ///
+ /// Whether WiredTiger cache metrics are available via serverStatus.
+ /// When true, dirty cache percentage is used for ingestion backpressure detection.
+ /// When false, write latency tracking is used instead.
+ ///
+ bool SupportsWiredTigerCacheMetrics { get; }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/MongoDbCommunityCapabilities.cs b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/MongoDbCommunityCapabilities.cs
new file mode 100644
index 0000000000..db13ce5a75
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/MongoDbCommunityCapabilities.cs
@@ -0,0 +1,22 @@
+#nullable enable
+
+namespace ServiceControl.Audit.Persistence.MongoDB.ProductCapabilities
+{
+ using System;
+
+ ///
+ /// Capabilities for MongoDB Community/Enterprise.
+ /// Full feature support.
+ ///
+ public class MongoDbCommunityCapabilities(Version? serverVersion = null) : IMongoProductCapabilities
+ {
+ public string ProductName => "MongoDB Community";
+ public Version? ServerVersion { get; } = serverVersion;
+ public bool SupportsMultiCollectionBulkWrite => ServerVersion >= MongoVersions.Version8;
+ public bool SupportsTextIndexes => true;
+ public bool SupportsTtlIndexes => true;
+ public int MaxDocumentSizeBytes => 16 * 1024 * 1024; // 16MB
+ public bool SupportsFacetAggregation => true;
+ public bool SupportsWiredTigerCacheMetrics => true;
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/MongoProductDetector.cs b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/MongoProductDetector.cs
new file mode 100644
index 0000000000..d64969100c
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/ProductCapabilities/MongoProductDetector.cs
@@ -0,0 +1,155 @@
+#nullable enable
+
+namespace ServiceControl.Audit.Persistence.MongoDB.ProductCapabilities
+{
+ using System;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Driver;
+
+ ///
+ /// Detects the MongoDB-compatible product from connection string or server info.
+ ///
+ public static class MongoProductDetector
+ {
+ ///
+ /// Detects the product capabilities by examining the connection string and server build info.
+ ///
+ /// Thrown when Azure Cosmos DB for MongoDB is detected, which is not supported.
+ public static async Task DetectAsync(IMongoClient client, string connectionString, CancellationToken cancellationToken = default)
+ {
+ // Check for unsupported products first
+ if (IsAzureCosmosDb(connectionString))
+ {
+ throw new NotSupportedException(
+ "Azure Cosmos DB for MongoDB is not supported due to significant limitations. " +
+ "Please use Azure DocumentDB, Amazon DocumentDB, or MongoDB Community/Enterprise instead.");
+ }
+
+ // Get server build info for version detection
+ var buildInfo = await GetBuildInfoAsync(client, cancellationToken).ConfigureAwait(false);
+ var serverVersion = ParseVersion(buildInfo);
+
+ // TODO: Is there a better way to determine the product type? User could be using a custom host name. Do we need to make this an explicit config option?
+ if (IsAzureDocumentDb(connectionString))
+ {
+ return new AzureDocumentDbCapabilities(serverVersion);
+ }
+
+ if (IsAmazonDocumentDb(connectionString))
+ {
+ var isElastic = IsElasticCluster(buildInfo);
+ return new AmazonDocumentDbCapabilities(isElastic, serverVersion);
+ }
+
+ // Check for MongoDB Enterprise modules
+ if (buildInfo != null && HasEnterpriseModules(buildInfo))
+ {
+ // MongoDB Enterprise - same capabilities as Community
+ return new MongoDbCommunityCapabilities(serverVersion);
+ }
+
+ // Default to MongoDB Community capabilities
+ return new MongoDbCommunityCapabilities(serverVersion);
+ }
+
+ // Azure Cosmos DB for MongoDB (RU-based) - NOT SUPPORTED
+ // Uses .documents.azure.com or .mongo.cosmos.azure.com with port 10255
+ static bool IsAzureCosmosDb(string connectionString) =>
+ connectionString.Contains(".documents.azure.com", StringComparison.OrdinalIgnoreCase) ||
+ connectionString.Contains(".mongo.cosmos.azure.com", StringComparison.OrdinalIgnoreCase);
+
+ // Azure DocumentDB (PostgreSQL-based) uses mongocluster.cosmos.azure.com
+ static bool IsAzureDocumentDb(string connectionString) =>
+ connectionString.Contains(".mongocluster.cosmos.azure.com", StringComparison.OrdinalIgnoreCase);
+
+ static bool IsAmazonDocumentDb(string connectionString)
+ {
+ // Amazon DocumentDB connection strings contain .docdb.amazonaws.com or docdb-elastic
+ return connectionString.Contains(".docdb.amazonaws.com", StringComparison.OrdinalIgnoreCase) ||
+ connectionString.Contains("docdb-elastic", StringComparison.OrdinalIgnoreCase);
+ }
+
+ static async Task GetBuildInfoAsync(IMongoClient client, CancellationToken cancellationToken)
+ {
+ try
+ {
+ var adminDb = client.GetDatabase("admin");
+ var command = new BsonDocument("buildInfo", 1);
+ return await adminDb.RunCommandAsync(command, cancellationToken: cancellationToken).ConfigureAwait(false);
+ }
+ catch
+ {
+ // If we can't get buildInfo, return null
+ return null;
+ }
+ }
+
+ static Version? ParseVersion(BsonDocument? buildInfo)
+ {
+ if (buildInfo == null)
+ {
+ return null;
+ }
+
+ if (!buildInfo.TryGetValue("version", out var versionValue))
+ {
+ return null;
+ }
+
+ var versionString = versionValue.AsString;
+ if (string.IsNullOrEmpty(versionString))
+ {
+ return null;
+ }
+
+ // MongoDB version format is typically "major.minor.patch" or "major.minor.patch-suffix"
+ // Extract just the numeric portion
+ var dashIndex = versionString.IndexOf('-');
+ if (dashIndex > 0)
+ {
+ versionString = versionString[..dashIndex];
+ }
+
+ return Version.TryParse(versionString, out var version) ? version : null;
+ }
+
+ static bool IsElasticCluster(BsonDocument? buildInfo)
+ {
+ if (buildInfo == null)
+ {
+ return false;
+ }
+
+ // Elastic clusters have different characteristics in buildInfo
+ if (buildInfo.TryGetValue("version", out var version))
+ {
+ var versionString = version.AsString;
+ // Elastic clusters may have specific version patterns
+ // This is a heuristic and may need adjustment
+ return versionString.Contains("elastic", StringComparison.OrdinalIgnoreCase);
+ }
+
+ return false;
+ }
+
+ static bool HasEnterpriseModules(BsonDocument buildInfo)
+ {
+ if (!buildInfo.TryGetValue("modules", out var modules) || !modules.IsBsonArray)
+ {
+ return false;
+ }
+
+ foreach (var module in modules.AsBsonArray)
+ {
+ if (module.AsString.Contains("enterprise", StringComparison.OrdinalIgnoreCase))
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Search/BodySearchEntry.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Search/BodySearchEntry.cs
new file mode 100644
index 0000000000..3bf4f48635
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Search/BodySearchEntry.cs
@@ -0,0 +1,14 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Search
+{
+ using System;
+
+ readonly struct BodyEntry
+ {
+ public required string Id { get; init; }
+ public required string ContentType { get; init; }
+ public required int BodySize { get; init; }
+ public string TextBody { get; init; }
+ public byte[] BinaryBody { get; init; }
+ public required DateTime ExpiresAt { get; init; }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/Search/BodySearchIndexer.cs b/src/ServiceControl.Audit.Persistence.MongoDB/Search/BodySearchIndexer.cs
new file mode 100644
index 0000000000..9fb0ce6ce0
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/Search/BodySearchIndexer.cs
@@ -0,0 +1,215 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.Search
+{
+ using System;
+ using System.Collections.Generic;
+ using System.Linq;
+ using System.Threading;
+ using System.Threading.Channels;
+ using System.Threading.Tasks;
+ using Collections;
+ using Documents;
+ using global::MongoDB.Driver;
+ using Microsoft.Extensions.Hosting;
+ using Microsoft.Extensions.Logging;
+
+ class BodyStorageWriter(
+ Channel channel,
+ IMongoClientProvider clientProvider,
+ MongoSettings settings,
+ ILogger logger)
+ : BackgroundService
+ {
+ readonly int BatchSize = settings.BodyWriterBatchSize;
+ readonly int ParallelWriters = settings.BodyWriterParallelWriters;
+ readonly TimeSpan BatchTimeout = settings.BodyWriterBatchTimeout;
+ const int MaxRetries = 3;
+
+ readonly Channel> batchChannel = Channel.CreateBounded>(
+ new BoundedChannelOptions(settings.BodyWriterParallelWriters * 2)
+ {
+ SingleReader = false,
+ SingleWriter = true,
+ AllowSynchronousContinuations = false,
+ FullMode = BoundedChannelFullMode.Wait
+ });
+
+ protected override async Task ExecuteAsync(CancellationToken stoppingToken)
+ {
+ logger.LogInformation("Body storage writer started ({Writers} writers, batch size {BatchSize})", ParallelWriters, BatchSize);
+
+ var assemblerTask = Task.Run(() => BatchAssemblerLoop(stoppingToken), CancellationToken.None);
+
+ var writerTasks = new Task[ParallelWriters];
+ for (var i = 0; i < ParallelWriters; i++)
+ {
+ var writerId = i;
+ writerTasks[i] = Task.Run(() => WriterLoop(writerId, stoppingToken), CancellationToken.None);
+ }
+
+ try
+ {
+ await Task.WhenAll(writerTasks.Append(assemblerTask)).ConfigureAwait(false);
+ }
+ catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
+ {
+ // Expected during shutdown
+ }
+
+ logger.LogInformation("Body storage writer stopped");
+ }
+
+ async Task BatchAssemblerLoop(CancellationToken stoppingToken)
+ {
+ var batch = new List(BatchSize);
+
+ try
+ {
+ while (await channel.Reader.WaitToReadAsync(stoppingToken).ConfigureAwait(false))
+ {
+ while (batch.Count < BatchSize && channel.Reader.TryRead(out var entry))
+ {
+ batch.Add(ToDocument(entry));
+ }
+
+ if (batch.Count > 0 && batch.Count < BatchSize)
+ {
+ using var timeoutCts = CancellationTokenSource.CreateLinkedTokenSource(stoppingToken);
+ timeoutCts.CancelAfter(BatchTimeout);
+ try
+ {
+ while (batch.Count < BatchSize)
+ {
+ if (!await channel.Reader.WaitToReadAsync(timeoutCts.Token).ConfigureAwait(false))
+ {
+ break;
+ }
+
+ while (batch.Count < BatchSize && channel.Reader.TryRead(out var entry))
+ {
+ batch.Add(ToDocument(entry));
+ }
+ }
+ }
+ catch (OperationCanceledException) when (!stoppingToken.IsCancellationRequested)
+ {
+ // Timeout expired - dispatch partial batch
+ }
+ }
+
+ if (batch.Count > 0)
+ {
+ await batchChannel.Writer.WriteAsync(batch, stoppingToken).ConfigureAwait(false);
+ batch = new List(BatchSize);
+ }
+ }
+ }
+ catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
+ {
+ // Shutting down - drain channel into remaining batches
+ while (channel.Reader.TryRead(out var entry))
+ {
+ batch.Add(ToDocument(entry));
+
+ if (batch.Count >= BatchSize)
+ {
+ await batchChannel.Writer.WriteAsync(batch, CancellationToken.None).ConfigureAwait(false);
+ batch = new List(BatchSize);
+ }
+ }
+
+ if (batch.Count > 0)
+ {
+ await batchChannel.Writer.WriteAsync(batch, CancellationToken.None).ConfigureAwait(false);
+ }
+ }
+ finally
+ {
+ batchChannel.Writer.Complete();
+ }
+ }
+
+ async Task WriterLoop(int writerId, CancellationToken stoppingToken)
+ {
+ logger.LogDebug("Body writer {WriterId} started", writerId);
+
+ try
+ {
+ // Use CancellationToken.None for FlushBatch so in-flight writes complete
+ // during shutdown. ReadAllAsync(stoppingToken) controls when we stop
+ // accepting new batches.
+ await foreach (var batch in batchChannel.Reader.ReadAllAsync(stoppingToken).ConfigureAwait(false))
+ {
+ await FlushBatch(batch, CancellationToken.None).ConfigureAwait(false);
+ }
+ }
+ catch (OperationCanceledException) when (stoppingToken.IsCancellationRequested)
+ {
+ // Expected during shutdown
+ }
+
+ // Drain any remaining batches after the assembler completes the channel
+ while (batchChannel.Reader.TryRead(out var batch))
+ {
+ await FlushBatchBestEffort(batch).ConfigureAwait(false);
+ }
+
+ logger.LogDebug("Body writer {WriterId} stopped", writerId);
+ }
+
+ async Task FlushBatchBestEffort(List batch)
+ {
+ try
+ {
+ await FlushBatch(batch, CancellationToken.None).ConfigureAwait(false);
+ }
+ catch (Exception ex)
+ {
+ logger.LogError(ex, "Failed to flush {Count} body entries during shutdown", batch.Count);
+ }
+ }
+
+ async Task FlushBatch(List batch, CancellationToken cancellationToken)
+ {
+ var collection = clientProvider.Database
+ .GetCollection(CollectionNames.MessageBodies);
+
+ var writes = batch.Select(doc =>
+ new ReplaceOneModel(
+ Builders.Filter.Eq(d => d.Id, doc.Id),
+ doc)
+ { IsUpsert = true })
+ .ToList();
+
+ for (var attempt = 1; attempt <= MaxRetries; attempt++)
+ {
+ try
+ {
+ _ = await collection.BulkWriteAsync(writes, new BulkWriteOptions { IsOrdered = false }, cancellationToken).ConfigureAwait(false);
+ logger.LogDebug("Wrote {Count} body entries", batch.Count);
+ return;
+ }
+ catch (Exception ex) when (attempt < MaxRetries && !cancellationToken.IsCancellationRequested)
+ {
+ var delay = TimeSpan.FromSeconds(Math.Pow(2, attempt - 1));
+ logger.LogWarning(ex, "Failed to write {Count} body entries (attempt {Attempt}/{MaxRetries}), retrying in {Delay}s",
+ batch.Count, attempt, MaxRetries, delay.TotalSeconds);
+ await Task.Delay(delay, cancellationToken).ConfigureAwait(false);
+ }
+ catch (Exception ex)
+ {
+ logger.LogError(ex, "Failed to write {Count} body entries after {MaxRetries} attempts", batch.Count, MaxRetries);
+ }
+ }
+ }
+
+ static MessageBodyDocument ToDocument(BodyEntry entry) => new()
+ {
+ Id = entry.Id,
+ ContentType = entry.ContentType,
+ BodySize = entry.BodySize,
+ TextBody = entry.TextBody,
+ BinaryBody = entry.BinaryBody,
+ ExpiresAt = entry.ExpiresAt
+ };
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/ServiceControl.Audit.Persistence.MongoDB.csproj b/src/ServiceControl.Audit.Persistence.MongoDB/ServiceControl.Audit.Persistence.MongoDB.csproj
new file mode 100644
index 0000000000..3486e2d0b9
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/ServiceControl.Audit.Persistence.MongoDB.csproj
@@ -0,0 +1,34 @@
+
+
+
+ net8.0
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/UnitOfWork/MongoAuditIngestionUnitOfWork.cs b/src/ServiceControl.Audit.Persistence.MongoDB/UnitOfWork/MongoAuditIngestionUnitOfWork.cs
new file mode 100644
index 0000000000..e91f9849da
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/UnitOfWork/MongoAuditIngestionUnitOfWork.cs
@@ -0,0 +1,329 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.UnitOfWork
+{
+ using System;
+ using System.Collections.Generic;
+ using System.IO;
+ using System.Linq;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Auditing;
+ using Auditing.BodyStorage;
+ using BodyStorage;
+ using Collections;
+ using Documents;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Driver;
+ using System.Threading.Channels;
+ using Monitoring;
+ using NServiceBus;
+ using Persistence.UnitOfWork;
+ using Search;
+ using ServiceControl.SagaAudit;
+
+ class MongoAuditIngestionUnitOfWork(
+ IMongoClient client,
+ IMongoDatabase database,
+ bool supportsMultiCollectionBulkWrite,
+ TimeSpan auditRetentionPeriod,
+ IBodyStorage bodyStorage,
+ int maxBodySizeToStore,
+ Channel bodyChannel = null)
+ : IAuditIngestionUnitOfWork
+ {
+ readonly List processedMessages = [];
+ readonly List knownEndpoints = [];
+ readonly List sagaSnapshots = [];
+
+ public async Task RecordProcessedMessage(ProcessedMessage processedMessage, ReadOnlyMemory body, CancellationToken cancellationToken)
+ {
+ processedMessage.MessageMetadata["ContentLength"] = body.Length;
+
+ // Determine if body should be stored based on size limit and storage type
+ var shouldStoreBody = !body.IsEmpty && body.Length <= maxBodySizeToStore && bodyStorage is not NullBodyStorage;
+
+ if (shouldStoreBody)
+ {
+ processedMessage.MessageMetadata["BodyUrl"] = $"/messages/{processedMessage.Id}/body";
+ var contentType = processedMessage.Headers.GetValueOrDefault(Headers.ContentType, "text/plain");
+
+ if (bodyStorage is MongoBodyStorage)
+ {
+ // Database body storage: enqueue for async write via BodyStorageWriter
+ var textBody = TryGetUtf8String(body);
+ var bodyExpiresAt = DateTime.UtcNow.Add(auditRetentionPeriod);
+
+ if (bodyChannel != null)
+ {
+ await bodyChannel.Writer.WriteAsync(new BodyEntry
+ {
+ Id = processedMessage.Id,
+ ContentType = contentType,
+ BodySize = body.Length,
+ TextBody = textBody,
+ BinaryBody = textBody == null ? body.ToArray() : null,
+ ExpiresAt = bodyExpiresAt
+ }, cancellationToken).ConfigureAwait(false);
+ }
+ }
+ else
+ {
+ // External storage (file system, BLOB, etc.)
+ using var bodyStream = new MemoryStream(body.ToArray());
+ await bodyStorage.Store(processedMessage.Id, contentType, body.Length, bodyStream, cancellationToken).ConfigureAwait(false);
+ }
+ }
+
+ var expiresAt = DateTime.UtcNow.Add(auditRetentionPeriod);
+
+ processedMessages.Add(new ProcessedMessageDocument
+ {
+ Id = processedMessage.Id,
+ UniqueMessageId = processedMessage.UniqueMessageId,
+ MessageMetadata = ConvertToBsonDocument(processedMessage.MessageMetadata),
+ Headers = processedMessage.Headers,
+ ProcessedAt = processedMessage.ProcessedAt,
+ ExpiresAt = expiresAt
+ });
+ }
+
+ // Attempts to decode the body as UTF-8 text. Returns null if the body contains invalid UTF-8 (binary content).
+ static string TryGetUtf8String(ReadOnlyMemory body)
+ {
+ try
+ {
+ // Use strict UTF-8 encoding that throws on invalid sequences
+ // Default UTF8Encoding silently replaces invalid bytes with replacement characters
+ return StrictUtf8Encoding.GetString(body.Span);
+ }
+ catch
+ {
+ // Body is not valid UTF-8 text (binary content)
+ return null;
+ }
+ }
+
+ // UTF-8 encoding that throws DecoderFallbackException on invalid byte sequences
+ static readonly System.Text.Encoding StrictUtf8Encoding = new System.Text.UTF8Encoding(encoderShouldEmitUTF8Identifier: false, throwOnInvalidBytes: true);
+
+ public Task RecordKnownEndpoint(KnownEndpoint knownEndpoint, CancellationToken cancellationToken)
+ {
+ knownEndpoints.Add(new KnownEndpointDocument
+ {
+ Id = KnownEndpoint.MakeDocumentId(knownEndpoint.Name, knownEndpoint.HostId),
+ Name = knownEndpoint.Name,
+ HostId = knownEndpoint.HostId,
+ Host = knownEndpoint.Host,
+ LastSeen = knownEndpoint.LastSeen,
+ ExpiresAt = DateTime.UtcNow.Add(auditRetentionPeriod)
+ });
+
+ return Task.CompletedTask;
+ }
+
+ public Task RecordSagaSnapshot(SagaSnapshot sagaSnapshot, CancellationToken cancellationToken)
+ {
+ sagaSnapshots.Add(new SagaSnapshotDocument
+ {
+ // TODO: Verify this ID assignment logic. Will the sagaSnapshot.Id ever be null, or is it always null resulting in a new ObjectId being generated every time.
+ Id = sagaSnapshot.Id ?? ObjectId.GenerateNewId().ToString(),
+ SagaId = sagaSnapshot.SagaId,
+ SagaType = sagaSnapshot.SagaType,
+ StartTime = sagaSnapshot.StartTime,
+ FinishTime = sagaSnapshot.FinishTime,
+ Status = sagaSnapshot.Status,
+ StateAfterChange = sagaSnapshot.StateAfterChange,
+ InitiatingMessage = sagaSnapshot.InitiatingMessage != null ? ToDocument(sagaSnapshot.InitiatingMessage) : null,
+ OutgoingMessages = sagaSnapshot.OutgoingMessages.Select(ToDocument).ToList(),
+ Endpoint = sagaSnapshot.Endpoint,
+ ProcessedAt = sagaSnapshot.ProcessedAt,
+ ExpiresAt = DateTime.UtcNow.Add(auditRetentionPeriod)
+ });
+
+ return Task.CompletedTask;
+ }
+
+ public async Task CommitAsync()
+ {
+ if (processedMessages.Count == 0 && knownEndpoints.Count == 0 && sagaSnapshots.Count == 0)
+ {
+ return;
+ }
+
+ if (supportsMultiCollectionBulkWrite)
+ {
+ await CommitWithMultiCollectionBulkWriteAsync().ConfigureAwait(false);
+ }
+ else
+ {
+ await CommitWithParallelBulkWritesAsync().ConfigureAwait(false);
+ }
+ }
+
+ async Task CommitWithMultiCollectionBulkWriteAsync()
+ {
+ var models = new List();
+ var databaseName = database.DatabaseNamespace.DatabaseName;
+
+ foreach (var doc in processedMessages)
+ {
+ var ns = new CollectionNamespace(databaseName, CollectionNames.ProcessedMessages);
+ var filter = Builders.Filter.Eq(d => d.Id, doc.Id);
+ models.Add(new BulkWriteReplaceOneModel(ns, filter, doc) { IsUpsert = true });
+ }
+
+ foreach (var doc in knownEndpoints)
+ {
+ var ns = new CollectionNamespace(databaseName, CollectionNames.KnownEndpoints);
+ var filter = Builders.Filter.Eq(d => d.Id, doc.Id);
+ models.Add(new BulkWriteReplaceOneModel(ns, filter, doc) { IsUpsert = true });
+ }
+
+ foreach (var doc in sagaSnapshots)
+ {
+ var ns = new CollectionNamespace(databaseName, CollectionNames.SagaSnapshots);
+ var filter = Builders.Filter.Eq(d => d.Id, doc.Id);
+ models.Add(new BulkWriteReplaceOneModel(ns, filter, doc) { IsUpsert = true });
+ }
+
+ _ = await client.BulkWriteAsync(models, new ClientBulkWriteOptions { IsOrdered = false }).ConfigureAwait(false);
+ }
+
+ async Task CommitWithParallelBulkWritesAsync()
+ {
+ var tasks = new List(3);
+
+ if (processedMessages.Count > 0)
+ {
+ tasks.Add(BulkUpsertAsync(
+ database.GetCollection(CollectionNames.ProcessedMessages),
+ processedMessages,
+ doc => doc.Id));
+ }
+
+ if (knownEndpoints.Count > 0)
+ {
+ tasks.Add(BulkUpsertAsync(
+ database.GetCollection(CollectionNames.KnownEndpoints),
+ knownEndpoints,
+ doc => doc.Id));
+ }
+
+ if (sagaSnapshots.Count > 0)
+ {
+ tasks.Add(BulkUpsertAsync(
+ database.GetCollection(CollectionNames.SagaSnapshots),
+ sagaSnapshots,
+ doc => doc.Id));
+ }
+
+ await Task.WhenAll(tasks).ConfigureAwait(false);
+ }
+
+ // This is a slight misuse of the IAsyncDisposable pattern, however it works fine because the AuditPersister always calls DisposeAsync() in the finally block
+ // The method is doing business logic (flushing batched writes to MongoDB) rather than releasing resources.
+ // TODO: A cleaner approach would be to have an explicit CommitAsync method on the IAuditIngestionUnitOfWork interface.
+ public async ValueTask DisposeAsync() => await CommitAsync().ConfigureAwait(false);
+
+ static async Task BulkUpsertAsync(IMongoCollection collection, List documents, Func idSelector)
+ {
+ var writes = documents.Select(doc =>
+ new ReplaceOneModel(
+ Builders.Filter.Eq("_id", idSelector(doc)),
+ doc)
+ { IsUpsert = true });
+
+ _ = await collection.BulkWriteAsync(writes, new BulkWriteOptions { IsOrdered = false }).ConfigureAwait(false);
+ }
+
+ static BsonDocument ConvertToBsonDocument(Dictionary dictionary)
+ {
+ var doc = new BsonDocument();
+ foreach (var kvp in dictionary)
+ {
+ doc[kvp.Key] = ConvertToBsonValue(kvp.Value);
+ }
+ return doc;
+ }
+
+ // mostly here to handle special types not natively supported by BsonTypeMapper
+ static BsonValue ConvertToBsonValue(object value)
+ {
+ if (value == null)
+ {
+ return BsonNull.Value;
+ }
+
+ // Handle types that need special conversion
+ if (value is TimeSpan ts)
+ {
+ return ts.ToString();
+ }
+
+ if (value is DateTimeOffset dto)
+ {
+ return dto.UtcDateTime;
+ }
+
+ // Guids - convert to string to avoid BinaryData serialization issues
+ // This is also consistent with how RavenDB stores Guids in metadata
+ if (value is Guid guid)
+ {
+ return guid.ToString();
+ }
+
+ // Enums - convert to string for readability
+ if (value.GetType().IsEnum)
+ {
+ return value.ToString();
+ }
+
+ // Nested dictionaries need recursive conversion to handle special types like TimeSpan
+ if (value is IDictionary dict)
+ {
+ return ConvertToBsonDocument(dict as Dictionary ?? new Dictionary(dict));
+ }
+
+ // Lists/Arrays - recursively convert items to handle special types
+ if (value is System.Collections.IEnumerable enumerable and not string and not byte[])
+ {
+ var array = new BsonArray();
+ foreach (var item in enumerable)
+ {
+ array.Add(ConvertToBsonValue(item));
+ }
+ return array;
+ }
+
+ // Try BsonTypeMapper for natively supported types (primitives, string, DateTime, Guid, byte[], etc.)
+ if (BsonTypeMapper.TryMapToBsonValue(value, out var bsonValue))
+ {
+ return bsonValue;
+ }
+
+ // Complex objects (like EndpointDetails) - serialize to BsonDocument
+ return value.ToBsonDocument(value.GetType());
+ }
+
+ static InitiatingMessageDocument ToDocument(InitiatingMessage msg) => new()
+ {
+ MessageId = msg.MessageId,
+ MessageType = msg.MessageType,
+ IsSagaTimeoutMessage = msg.IsSagaTimeoutMessage,
+ OriginatingMachine = msg.OriginatingMachine,
+ OriginatingEndpoint = msg.OriginatingEndpoint,
+ TimeSent = msg.TimeSent,
+ Intent = msg.Intent
+ };
+
+ static ResultingMessageDocument ToDocument(ResultingMessage msg) => new()
+ {
+ MessageId = msg.MessageId,
+ MessageType = msg.MessageType,
+ Destination = msg.Destination,
+ TimeSent = msg.TimeSent,
+ Intent = msg.Intent,
+ DeliveryDelay = msg.DeliveryDelay?.ToString(),
+ DeliverAt = msg.DeliverAt
+ };
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/UnitOfWork/MongoAuditIngestionUnitOfWorkFactory.cs b/src/ServiceControl.Audit.Persistence.MongoDB/UnitOfWork/MongoAuditIngestionUnitOfWorkFactory.cs
new file mode 100644
index 0000000000..d6177423dd
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/UnitOfWork/MongoAuditIngestionUnitOfWorkFactory.cs
@@ -0,0 +1,34 @@
+namespace ServiceControl.Audit.Persistence.MongoDB.UnitOfWork
+{
+ using System.Threading;
+ using System.Threading.Channels;
+ using System.Threading.Tasks;
+ using Auditing.BodyStorage;
+ using Persistence.UnitOfWork;
+ using Search;
+
+ class MongoAuditIngestionUnitOfWorkFactory(
+ IMongoClientProvider clientProvider,
+ MongoSettings settings,
+ IBodyStorage bodyStorage,
+ MinimumRequiredStorageState storageState,
+ Channel bodyChannel = null)
+ : IAuditIngestionUnitOfWorkFactory
+ {
+ public ValueTask StartNew(int batchSize, CancellationToken cancellationToken)
+ {
+ var unitOfWork = new MongoAuditIngestionUnitOfWork(
+ clientProvider.Client,
+ clientProvider.Database,
+ clientProvider.ProductCapabilities.SupportsMultiCollectionBulkWrite,
+ settings.AuditRetentionPeriod,
+ bodyStorage,
+ settings.MaxBodySizeToStore,
+ bodyChannel);
+
+ return ValueTask.FromResult(unitOfWork);
+ }
+
+ public bool CanIngestMore() => storageState.CanIngestMore;
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.MongoDB/persistence.manifest b/src/ServiceControl.Audit.Persistence.MongoDB/persistence.manifest
new file mode 100644
index 0000000000..9f6c0311c8
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.MongoDB/persistence.manifest
@@ -0,0 +1,13 @@
+{
+ "Name": "MongoDB",
+ "DisplayName": "MongoDB",
+ "Description": "MongoDB ServiceControl Audit persister (supports MongoDB, Azure DocumentDB, Amazon DocumentDB)",
+ "AssemblyName": "ServiceControl.Audit.Persistence.MongoDB",
+ "TypeName": "ServiceControl.Audit.Persistence.MongoDB.MongoPersistenceConfiguration, ServiceControl.Audit.Persistence.MongoDB",
+ "Settings": [
+ {
+ "Name": "Database/ConnectionString",
+ "Mandatory": true
+ }
+ ]
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/AuditDataStoreTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/AuditDataStoreTests.cs
new file mode 100644
index 0000000000..d7f1a6e114
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/AuditDataStoreTests.cs
@@ -0,0 +1,17 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AmazonDocumentDb
+{
+ using NUnit.Framework;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// AuditDataStore tests for Amazon DocumentDB.
+ /// Requires Amazon_DocumentDb_ConnectionString environment variable to be set.
+ ///
+ [TestFixture]
+ [Category("AmazonDocumentDb")]
+ class AuditDataStoreTests : AuditDataStoreTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AmazonDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/BodyStorageTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/BodyStorageTests.cs
new file mode 100644
index 0000000000..3b6e965077
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/BodyStorageTests.cs
@@ -0,0 +1,17 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AmazonDocumentDb
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// Body storage tests for Amazon DocumentDB.
+ /// Requires AWS_DOCUMENTDB_CONNECTION_STRING environment variable.
+ ///
+ [TestFixture]
+ [Category("AmazonDocumentDb")]
+ class BodyStorageTests : BodyStorageTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AmazonDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/FailedAuditStorageTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/FailedAuditStorageTests.cs
new file mode 100644
index 0000000000..d28ed4a92c
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/FailedAuditStorageTests.cs
@@ -0,0 +1,17 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AmazonDocumentDb
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// FailedAuditStorage tests for Amazon DocumentDB.
+ /// Requires Amazon_DocumentDb_ConnectionString environment variable to be set.
+ ///
+ [TestFixture]
+ [Category("AmazonDocumentDb")]
+ class FailedAuditStorageTests : FailedAuditStorageTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AmazonDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/FullTextSearchTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/FullTextSearchTests.cs
new file mode 100644
index 0000000000..e0d1e2eed8
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/FullTextSearchTests.cs
@@ -0,0 +1,18 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AmazonDocumentDb
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// Full-text search tests for Amazon DocumentDB.
+ /// Requires AWS_DOCUMENTDB_CONNECTION_STRING environment variable.
+ /// Note: Amazon DocumentDB only supports English text search.
+ ///
+ [TestFixture]
+ [Category("AmazonDocumentDb")]
+ class FullTextSearchTests : FullTextSearchTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AmazonDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/LifecycleTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/LifecycleTests.cs
new file mode 100644
index 0000000000..d2eb0d1055
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/LifecycleTests.cs
@@ -0,0 +1,28 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AmazonDocumentDb
+{
+ using NUnit.Framework;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// Lifecycle tests for Amazon DocumentDB.
+ /// Requires AWS_DOCUMENTDB_CONNECTION_STRING environment variable to be set.
+ /// Optionally set AWS_DOCUMENTDB_IS_ELASTIC=true for Elastic cluster testing.
+ ///
+ [TestFixture]
+ [Category("AmazonDocumentDb")]
+ [Explicit("Requires Amazon DocumentDB connection string via AWS_DOCUMENTDB_CONNECTION_STRING environment variable")]
+ class LifecycleTests : LifecycleTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AmazonDocumentDbEnvironment();
+
+ [OneTimeSetUp]
+ public void CheckEnvironmentAvailable()
+ {
+ if (!AmazonDocumentDbEnvironment.IsAvailable())
+ {
+ Assert.Ignore("Amazon DocumentDB connection string not configured. Set AWS_DOCUMENTDB_CONNECTION_STRING to run these tests.");
+ }
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/UnitOfWorkTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/UnitOfWorkTests.cs
new file mode 100644
index 0000000000..fde3c2cf82
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AmazonDocumentDb/UnitOfWorkTests.cs
@@ -0,0 +1,17 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AmazonDocumentDb
+{
+ using NUnit.Framework;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// UnitOfWork tests for Amazon DocumentDB.
+ /// Requires Amazon_DocumentDb_ConnectionString environment variable to be set.
+ ///
+ [TestFixture]
+ [Category("AmazonDocumentDb")]
+ class UnitOfWorkTests : UnitOfWorkTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AmazonDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/AuditDataStoreTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/AuditDataStoreTests.cs
new file mode 100644
index 0000000000..6fa8c108bb
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/AuditDataStoreTests.cs
@@ -0,0 +1,17 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AzureDocumentDb
+{
+ using NUnit.Framework;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// AuditDataStore tests for Azure DocumentDB.
+ /// Requires AZURE_DOCUMENTDB_CONNECTION_STRING environment variable to be set.
+ ///
+ [TestFixture]
+ [Category("AzureDocumentDb")]
+ class AuditDataStoreTests : AuditDataStoreTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AzureDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/BodyStorageTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/BodyStorageTests.cs
new file mode 100644
index 0000000000..3974dc9d9b
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/BodyStorageTests.cs
@@ -0,0 +1,17 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AzureDocumentDb
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// Body storage tests for Azure Cosmos DB (MongoDB API).
+ /// Requires AZURE_COSMOS_CONNECTION_STRING environment variable.
+ ///
+ [TestFixture]
+ [Category("AzureDocumentDb")]
+ class BodyStorageTests : BodyStorageTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AzureDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/FailedAuditStorageTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/FailedAuditStorageTests.cs
new file mode 100644
index 0000000000..9023bbddb2
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/FailedAuditStorageTests.cs
@@ -0,0 +1,17 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AzureDocumentDb
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// FailedAuditStorage tests for Azure DocumentDB.
+ /// Requires AZURE_DOCUMENTDB_CONNECTION_STRING environment variable to be set.
+ ///
+ [TestFixture]
+ [Category("AzureDocumentDb")]
+ class FailedAuditStorageTests : FailedAuditStorageTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AzureDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/FullTextSearchTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/FullTextSearchTests.cs
new file mode 100644
index 0000000000..7be344cc20
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/FullTextSearchTests.cs
@@ -0,0 +1,17 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AzureDocumentDb
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// Full-text search tests for Azure Cosmos DB (MongoDB API).
+ /// Requires AZURE_COSMOS_CONNECTION_STRING environment variable.
+ ///
+ [TestFixture]
+ [Category("AzureDocumentDb")]
+ class FullTextSearchTests : FullTextSearchTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AzureDocumentDbEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/LifecycleTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/LifecycleTests.cs
new file mode 100644
index 0000000000..edbe607e39
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/LifecycleTests.cs
@@ -0,0 +1,27 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AzureDocumentDb
+{
+ using NUnit.Framework;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// Lifecycle tests for Azure DocumentDB.
+ /// Requires AZURE_DOCUMENTDB_CONNECTION_STRING environment variable to be set.
+ ///
+ [TestFixture]
+ [Category("AzureDocumentDb")]
+ [Explicit("Requires Azure DocumentDB connection string via AZURE_DOCUMENTDB_CONNECTION_STRING environment variable")]
+ class LifecycleTests : LifecycleTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AzureDocumentDbEnvironment();
+
+ [OneTimeSetUp]
+ public void CheckEnvironmentAvailable()
+ {
+ if (!AzureDocumentDbEnvironment.IsAvailable())
+ {
+ Assert.Ignore("Azure DocumentDB connection string not configured. Set AZURE_DOCUMENTDB_CONNECTION_STRING to run these tests.");
+ }
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/UnitOfWorkTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/UnitOfWorkTests.cs
new file mode 100644
index 0000000000..8e63409cf6
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/AzureDocumentDb/UnitOfWorkTests.cs
@@ -0,0 +1,153 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.AzureDocumentDb
+{
+ using System;
+ using System.Collections.Generic;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Driver;
+ using Microsoft.Extensions.DependencyInjection;
+ using Microsoft.Extensions.Hosting;
+ using NUnit.Framework;
+ using ServiceControl.Audit.Auditing;
+ using ServiceControl.Audit.Persistence.MongoDB;
+ using ServiceControl.Audit.Persistence.MongoDB.Collections;
+ using ServiceControl.Audit.Persistence.Monitoring;
+ using ServiceControl.Audit.Persistence.UnitOfWork;
+ using ServiceControl.SagaAudit;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// UnitOfWork tests for Azure DocumentDB.
+ /// Requires AZURE_DOCUMENTDB_CONNECTION_STRING environment variable to be set.
+ ///
+ [TestFixture]
+ [Category("AzureDocumentDb")]
+ class UnitOfWorkTests : UnitOfWorkTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new AzureDocumentDbEnvironment();
+
+ ///
+ /// Verifies that the parallel bulk write fallback path works correctly for Azure DocumentDB,
+ /// which does not support MongoDB 8.0's multi-collection bulk write feature.
+ ///
+ [Test]
+ public async Task Should_use_parallel_bulk_writes_for_multi_collection_operations()
+ {
+ // Arrange
+ var databaseName = $"test_parallel_bulk_{Guid.NewGuid():N}";
+ var connectionString = Environment.BuildConnectionString(databaseName);
+
+ var persistenceSettings = new PersistenceSettings(TimeSpan.FromHours(1), true, 100000);
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.ConnectionStringKey] = connectionString;
+
+ var config = new MongoPersistenceConfiguration();
+ var persistence = config.Create(persistenceSettings);
+
+ var hostBuilder = Host.CreateApplicationBuilder();
+ persistence.AddPersistence(hostBuilder.Services);
+
+ using var host = hostBuilder.Build();
+ await host.StartAsync().ConfigureAwait(false);
+
+ try
+ {
+ var clientProvider = host.Services.GetRequiredService();
+ var database = clientProvider.Database;
+ var factory = host.Services.GetRequiredService();
+
+ // Verify capability is false for Azure DocumentDB
+ Assert.That(clientProvider.ProductCapabilities.SupportsMultiCollectionBulkWrite, Is.False,
+ "Azure DocumentDB should not support multi-collection bulk write");
+
+ // Act - Write to all three collections in a single unit of work
+ var unitOfWork = await factory.StartNew(10, CancellationToken.None).ConfigureAwait(false);
+ try
+ {
+ await unitOfWork.RecordProcessedMessage(CreateProcessedMessage("parallel-msg"), default, CancellationToken.None).ConfigureAwait(false);
+ await unitOfWork.RecordKnownEndpoint(CreateKnownEndpoint("ParallelEndpoint"), CancellationToken.None).ConfigureAwait(false);
+ await unitOfWork.RecordSagaSnapshot(CreateSagaSnapshot("ParallelSaga"), CancellationToken.None).ConfigureAwait(false);
+ }
+ finally
+ {
+ await unitOfWork.DisposeAsync().ConfigureAwait(false);
+ }
+
+ // Assert - All collections should have documents
+ var messagesCount = await database.GetCollection(CollectionNames.ProcessedMessages)
+ .CountDocumentsAsync(FilterDefinition.Empty).ConfigureAwait(false);
+ var endpointsCount = await database.GetCollection(CollectionNames.KnownEndpoints)
+ .CountDocumentsAsync(FilterDefinition.Empty).ConfigureAwait(false);
+ var sagasCount = await database.GetCollection(CollectionNames.SagaSnapshots)
+ .CountDocumentsAsync(FilterDefinition.Empty).ConfigureAwait(false);
+
+ Assert.Multiple(() =>
+ {
+ Assert.That(messagesCount, Is.EqualTo(1), "ProcessedMessages should be persisted via parallel bulk write");
+ Assert.That(endpointsCount, Is.EqualTo(1), "KnownEndpoints should be persisted via parallel bulk write");
+ Assert.That(sagasCount, Is.EqualTo(1), "SagaSnapshots should be persisted via parallel bulk write");
+ });
+ }
+ finally
+ {
+ var client = new MongoClient(connectionString);
+ await client.DropDatabaseAsync(databaseName).ConfigureAwait(false);
+ await host.StopAsync().ConfigureAwait(false);
+ }
+ }
+
+ static ProcessedMessage CreateProcessedMessage(string messageId)
+ {
+ var headers = new Dictionary
+ {
+ ["NServiceBus.MessageId"] = messageId,
+ ["NServiceBus.ProcessingStarted"] = DateTime.UtcNow.AddSeconds(-1).ToString("O"),
+ ["NServiceBus.ProcessingEnded"] = DateTime.UtcNow.ToString("O"),
+ ["$.diagnostics.originating.hostid"] = Guid.NewGuid().ToString(),
+ ["NServiceBus.ProcessingEndpoint"] = "TestEndpoint"
+ };
+
+ var metadata = new Dictionary
+ {
+ ["MessageId"] = messageId,
+ ["MessageType"] = "TestMessage",
+ ["TimeSent"] = DateTime.UtcNow,
+ ["IsSystemMessage"] = false
+ };
+
+ return new ProcessedMessage(headers, metadata);
+ }
+
+ static KnownEndpoint CreateKnownEndpoint(string name) => new()
+ {
+ Name = name,
+ HostId = Guid.NewGuid(),
+ Host = "localhost",
+ LastSeen = DateTime.UtcNow
+ };
+
+ static SagaSnapshot CreateSagaSnapshot(string sagaType) => new()
+ {
+ SagaId = Guid.NewGuid(),
+ SagaType = sagaType,
+ StartTime = DateTime.UtcNow.AddMinutes(-1),
+ FinishTime = DateTime.UtcNow,
+ Status = SagaStateChangeStatus.Updated,
+ StateAfterChange = "{ }",
+ Endpoint = "TestEndpoint",
+ ProcessedAt = DateTime.UtcNow,
+ InitiatingMessage = new InitiatingMessage
+ {
+ MessageId = $"{sagaType}-init-msg",
+ MessageType = "TestMessage",
+ IsSagaTimeoutMessage = false,
+ OriginatingEndpoint = "Sender",
+ OriginatingMachine = "localhost",
+ TimeSent = DateTime.UtcNow.AddMinutes(-1),
+ Intent = "Send"
+ },
+ OutgoingMessages = []
+ };
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/AmazonDocumentDbEnvironment.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/AmazonDocumentDbEnvironment.cs
new file mode 100644
index 0000000000..07e968dd9b
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/AmazonDocumentDbEnvironment.cs
@@ -0,0 +1,74 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.Infrastructure
+{
+ using System;
+ using System.Threading.Tasks;
+ using global::MongoDB.Driver;
+
+ ///
+ /// Test environment for Amazon DocumentDB using an external connection string.
+ /// Set the AWS_DOCUMENTDB_CONNECTION_STRING environment variable to run these tests.
+ /// Optionally set AWS_DOCUMENTDB_IS_ELASTIC=true for Elastic cluster testing.
+ ///
+ public class AmazonDocumentDbEnvironment : IMongoTestEnvironment
+ {
+ const string ConnectionStringEnvVar = "AWS_DOCUMENTDB_CONNECTION_STRING";
+ const string IsElasticEnvVar = "AWS_DOCUMENTDB_IS_ELASTIC";
+
+ string connectionString;
+ bool isElasticCluster;
+
+ public string ProductName => isElasticCluster ? "Amazon DocumentDB (Elastic)" : "Amazon DocumentDB";
+
+ public Task Initialize()
+ {
+ connectionString = Environment.GetEnvironmentVariable(ConnectionStringEnvVar);
+
+ if (string.IsNullOrWhiteSpace(connectionString))
+ {
+ throw new InvalidOperationException(
+ $"Environment variable '{ConnectionStringEnvVar}' is not set. " +
+ "Set this variable to an Amazon DocumentDB connection string to run these tests.");
+ }
+
+ var isElasticValue = Environment.GetEnvironmentVariable(IsElasticEnvVar);
+ isElasticCluster = string.Equals(isElasticValue, "true", StringComparison.OrdinalIgnoreCase);
+
+ return Task.CompletedTask;
+ }
+
+ public string GetConnectionString() => connectionString;
+
+ public string BuildConnectionString(string databaseName)
+ {
+ var builder = new MongoUrlBuilder(connectionString)
+ {
+ DatabaseName = databaseName
+ };
+ return builder.ToString();
+ }
+
+ public ExpectedCapabilities GetExpectedCapabilities() => new()
+ {
+ ProductName = isElasticCluster ? "Amazon DocumentDB (Elastic)" : "Amazon DocumentDB",
+ SupportsMultiCollectionBulkWrite = false, // MongoDB 8.0+ feature, not supported by DocumentDB
+ SupportsGridFS = !isElasticCluster, // Elastic clusters do NOT support GridFS
+ SupportsTextIndexes = true, // English only
+ SupportsTransactions = true, // 1 minute limit
+ SupportsTtlIndexes = true,
+ SupportsChangeStreams = !isElasticCluster, // Elastic clusters do NOT support change streams
+ MaxDocumentSizeBytes = 16 * 1024 * 1024
+ };
+
+ public Task Cleanup()
+ {
+ // No cleanup needed for external service
+ return Task.CompletedTask;
+ }
+
+ ///
+ /// Checks if the Amazon DocumentDB environment is available.
+ ///
+ public static bool IsAvailable() =>
+ !string.IsNullOrWhiteSpace(Environment.GetEnvironmentVariable(ConnectionStringEnvVar));
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/AzureDocumentDbEnvironment.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/AzureDocumentDbEnvironment.cs
new file mode 100644
index 0000000000..1ddc5e6c3a
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/AzureDocumentDbEnvironment.cs
@@ -0,0 +1,68 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.Infrastructure
+{
+ using System;
+ using System.Threading.Tasks;
+ using global::MongoDB.Driver;
+
+ ///
+ /// Test environment for Azure DocumentDB using an external connection string.
+ /// Set the AZURE_DOCUMENTDB_CONNECTION_STRING environment variable to run these tests.
+ ///
+ public class AzureDocumentDbEnvironment : IMongoTestEnvironment
+ {
+ const string ConnectionStringEnvVar = "AZURE_DOCUMENTDB_CONNECTION_STRING";
+
+ string connectionString;
+
+ public string ProductName => "Azure DocumentDB";
+
+ public Task Initialize()
+ {
+ connectionString = Environment.GetEnvironmentVariable(ConnectionStringEnvVar);
+
+ if (string.IsNullOrWhiteSpace(connectionString))
+ {
+ throw new InvalidOperationException(
+ $"Environment variable '{ConnectionStringEnvVar}' is not set. " +
+ "Set this variable to an Azure DocumentDB connection string to run these tests.");
+ }
+
+ return Task.CompletedTask;
+ }
+
+ public string GetConnectionString() => connectionString;
+
+ public string BuildConnectionString(string databaseName)
+ {
+ var builder = new MongoUrlBuilder(connectionString)
+ {
+ DatabaseName = databaseName
+ };
+ return builder.ToString();
+ }
+
+ public ExpectedCapabilities GetExpectedCapabilities() => new()
+ {
+ ProductName = "Azure DocumentDB",
+ SupportsMultiCollectionBulkWrite = false, // MongoDB 8.0+ feature, not supported by Azure DocumentDB
+ SupportsGridFS = false, // Azure DocumentDB does not support GridFS
+ SupportsTextIndexes = true, // Uses PostgreSQL TSVector
+ SupportsTransactions = true, // 30 second limit
+ SupportsTtlIndexes = true,
+ SupportsChangeStreams = true,
+ MaxDocumentSizeBytes = 16 * 1024 * 1024
+ };
+
+ public Task Cleanup()
+ {
+ // No cleanup needed for external service
+ return Task.CompletedTask;
+ }
+
+ ///
+ /// Checks if the Azure DocumentDB environment is available.
+ ///
+ public static bool IsAvailable() =>
+ !string.IsNullOrWhiteSpace(Environment.GetEnvironmentVariable(ConnectionStringEnvVar));
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/IMongoTestEnvironment.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/IMongoTestEnvironment.cs
new file mode 100644
index 0000000000..b7d09c405a
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/IMongoTestEnvironment.cs
@@ -0,0 +1,57 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.Infrastructure
+{
+ using System.Threading.Tasks;
+
+ ///
+ /// Abstracts the test environment for different MongoDB-compatible products.
+ /// Provides connection strings and expected capabilities for each product.
+ ///
+ public interface IMongoTestEnvironment
+ {
+ ///
+ /// Gets the name of the product being tested.
+ ///
+ string ProductName { get; }
+
+ ///
+ /// Initializes the test environment (e.g., starts container, validates connection string).
+ ///
+ Task Initialize();
+
+ ///
+ /// Gets the connection string for the test environment.
+ ///
+ string GetConnectionString();
+
+ ///
+ /// Builds a connection string with the specified database name.
+ ///
+ string BuildConnectionString(string databaseName);
+
+ ///
+ /// Gets the expected product capabilities for assertion.
+ ///
+ ExpectedCapabilities GetExpectedCapabilities();
+
+ ///
+ /// Cleans up the test environment.
+ ///
+ Task Cleanup();
+ }
+
+ ///
+ /// Expected capabilities for a MongoDB-compatible product.
+ /// Used for test assertions.
+ ///
+ public class ExpectedCapabilities
+ {
+ public required string ProductName { get; init; }
+ public required bool SupportsMultiCollectionBulkWrite { get; init; }
+ public required bool SupportsGridFS { get; init; }
+ public required bool SupportsTextIndexes { get; init; }
+ public required bool SupportsTransactions { get; init; }
+ public required bool SupportsTtlIndexes { get; init; }
+ public required bool SupportsChangeStreams { get; init; }
+ public required int MaxDocumentSizeBytes { get; init; }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/MongoDbCommunityEnvironment.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/MongoDbCommunityEnvironment.cs
new file mode 100644
index 0000000000..19c6f02066
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Infrastructure/MongoDbCommunityEnvironment.cs
@@ -0,0 +1,48 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.Infrastructure
+{
+ using System.Threading.Tasks;
+ using global::MongoDB.Driver;
+ using ServiceControl.Audit.Persistence.Tests;
+
+ ///
+ /// Test environment for MongoDB Community/Enterprise using Docker via Testcontainers.
+ ///
+ public class MongoDbCommunityEnvironment : IMongoTestEnvironment
+ {
+ public string ProductName => "MongoDB";
+
+ public async Task Initialize()
+ {
+ _ = await SharedMongoDbContainer.GetInstance().ConfigureAwait(false);
+ }
+
+ public string GetConnectionString() => SharedMongoDbContainer.GetConnectionString();
+
+ public string BuildConnectionString(string databaseName)
+ {
+ var builder = new MongoUrlBuilder(GetConnectionString())
+ {
+ DatabaseName = databaseName
+ };
+ return builder.ToString();
+ }
+
+ public ExpectedCapabilities GetExpectedCapabilities() => new()
+ {
+ ProductName = "MongoDB Community",
+ SupportsMultiCollectionBulkWrite = true, // MongoDB 8.0+ feature, container uses 8.0
+ SupportsGridFS = true,
+ SupportsTextIndexes = true,
+ SupportsTransactions = true,
+ SupportsTtlIndexes = true,
+ SupportsChangeStreams = true,
+ MaxDocumentSizeBytes = 16 * 1024 * 1024
+ };
+
+ public Task Cleanup()
+ {
+ // Container is shared across tests and cleaned up at assembly level
+ return Task.CompletedTask;
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/AuditDataStoreTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/AuditDataStoreTests.cs
new file mode 100644
index 0000000000..de9fb09b9d
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/AuditDataStoreTests.cs
@@ -0,0 +1,16 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.MongoDbCommunity
+{
+ using NUnit.Framework;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// AuditDataStore tests for MongoDB Community/Enterprise using Docker via Testcontainers.
+ ///
+ [TestFixture]
+ [Category("MongoDbCommunity")]
+ class AuditDataStoreTests : AuditDataStoreTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new MongoDbCommunityEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/BodyStorageTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/BodyStorageTests.cs
new file mode 100644
index 0000000000..f5d4e3bc9d
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/BodyStorageTests.cs
@@ -0,0 +1,16 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.MongoDbCommunity
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// Body storage tests for MongoDB Community/Enterprise using Docker via Testcontainers.
+ ///
+ [TestFixture]
+ [Category("MongoDbCommunity")]
+ class BodyStorageTests : BodyStorageTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new MongoDbCommunityEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/FailedAuditStorageTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/FailedAuditStorageTests.cs
new file mode 100644
index 0000000000..70630640e0
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/FailedAuditStorageTests.cs
@@ -0,0 +1,16 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.MongoDbCommunity
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// FailedAuditStorage tests for MongoDB Community/Enterprise using Docker via Testcontainers.
+ ///
+ [TestFixture]
+ [Category("MongoDbCommunity")]
+ class FailedAuditStorageTests : FailedAuditStorageTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new MongoDbCommunityEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/FullTextSearchTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/FullTextSearchTests.cs
new file mode 100644
index 0000000000..7a8d00edf9
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/FullTextSearchTests.cs
@@ -0,0 +1,16 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.MongoDbCommunity
+{
+ using Infrastructure;
+ using NUnit.Framework;
+ using Shared;
+
+ ///
+ /// Full-text search tests for MongoDB Community/Enterprise using Docker via Testcontainers.
+ ///
+ [TestFixture]
+ [Category("MongoDbCommunity")]
+ class FullTextSearchTests : FullTextSearchTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new MongoDbCommunityEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/LifecycleTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/LifecycleTests.cs
new file mode 100644
index 0000000000..2a50ea3450
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/LifecycleTests.cs
@@ -0,0 +1,16 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.MongoDbCommunity
+{
+ using NUnit.Framework;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// Lifecycle tests for MongoDB Community/Enterprise using Docker via Testcontainers.
+ ///
+ [TestFixture]
+ [Category("MongoDbCommunity")]
+ class LifecycleTests : LifecycleTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new MongoDbCommunityEnvironment();
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/UnitOfWorkTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/UnitOfWorkTests.cs
new file mode 100644
index 0000000000..2dd9a0e855
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/MongoDbCommunity/UnitOfWorkTests.cs
@@ -0,0 +1,151 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.MongoDbCommunity
+{
+ using System;
+ using System.Collections.Generic;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Driver;
+ using Microsoft.Extensions.DependencyInjection;
+ using Microsoft.Extensions.Hosting;
+ using NUnit.Framework;
+ using ServiceControl.Audit.Auditing;
+ using ServiceControl.Audit.Persistence.MongoDB;
+ using ServiceControl.Audit.Persistence.MongoDB.Collections;
+ using ServiceControl.Audit.Persistence.Monitoring;
+ using ServiceControl.Audit.Persistence.UnitOfWork;
+ using ServiceControl.SagaAudit;
+ using Infrastructure;
+ using Shared;
+
+ ///
+ /// UnitOfWork tests for MongoDB Community/Enterprise using Docker via Testcontainers.
+ ///
+ [TestFixture]
+ [Category("MongoDbCommunity")]
+ class UnitOfWorkTests : UnitOfWorkTestsBase
+ {
+ protected override IMongoTestEnvironment CreateEnvironment() => new MongoDbCommunityEnvironment();
+
+ ///
+ /// Verifies that MongoDB 8.0+ multi-collection bulk write feature works correctly.
+ ///
+ [Test]
+ public async Task Should_use_multi_collection_bulk_write_for_multi_collection_operations()
+ {
+ // Arrange
+ var databaseName = $"test_multi_bulk_{Guid.NewGuid():N}";
+ var connectionString = Environment.BuildConnectionString(databaseName);
+
+ var persistenceSettings = new PersistenceSettings(TimeSpan.FromHours(1), true, 100000);
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.ConnectionStringKey] = connectionString;
+
+ var config = new MongoPersistenceConfiguration();
+ var persistence = config.Create(persistenceSettings);
+
+ var hostBuilder = Host.CreateApplicationBuilder();
+ persistence.AddPersistence(hostBuilder.Services);
+
+ using var host = hostBuilder.Build();
+ await host.StartAsync().ConfigureAwait(false);
+
+ try
+ {
+ var clientProvider = host.Services.GetRequiredService();
+ var database = clientProvider.Database;
+ var factory = host.Services.GetRequiredService();
+
+ // Verify capability is true for MongoDB 8.0+
+ Assert.That(clientProvider.ProductCapabilities.SupportsMultiCollectionBulkWrite, Is.True,
+ "MongoDB 8.0+ should support multi-collection bulk write");
+
+ // Act - Write to all three collections in a single unit of work
+ var unitOfWork = await factory.StartNew(10, CancellationToken.None).ConfigureAwait(false);
+ try
+ {
+ await unitOfWork.RecordProcessedMessage(CreateProcessedMessage("multi-bulk-msg"), default, CancellationToken.None).ConfigureAwait(false);
+ await unitOfWork.RecordKnownEndpoint(CreateKnownEndpoint("MultiBulkEndpoint"), CancellationToken.None).ConfigureAwait(false);
+ await unitOfWork.RecordSagaSnapshot(CreateSagaSnapshot("MultiBulkSaga"), CancellationToken.None).ConfigureAwait(false);
+ }
+ finally
+ {
+ await unitOfWork.DisposeAsync().ConfigureAwait(false);
+ }
+
+ // Assert - All collections should have documents
+ var messagesCount = await database.GetCollection(CollectionNames.ProcessedMessages)
+ .CountDocumentsAsync(FilterDefinition.Empty).ConfigureAwait(false);
+ var endpointsCount = await database.GetCollection(CollectionNames.KnownEndpoints)
+ .CountDocumentsAsync(FilterDefinition.Empty).ConfigureAwait(false);
+ var sagasCount = await database.GetCollection(CollectionNames.SagaSnapshots)
+ .CountDocumentsAsync(FilterDefinition.Empty).ConfigureAwait(false);
+
+ Assert.Multiple(() =>
+ {
+ Assert.That(messagesCount, Is.EqualTo(1), "ProcessedMessages should be persisted via multi-collection bulk write");
+ Assert.That(endpointsCount, Is.EqualTo(1), "KnownEndpoints should be persisted via multi-collection bulk write");
+ Assert.That(sagasCount, Is.EqualTo(1), "SagaSnapshots should be persisted via multi-collection bulk write");
+ });
+ }
+ finally
+ {
+ var client = new MongoClient(connectionString);
+ await client.DropDatabaseAsync(databaseName).ConfigureAwait(false);
+ await host.StopAsync().ConfigureAwait(false);
+ }
+ }
+
+ static ProcessedMessage CreateProcessedMessage(string messageId)
+ {
+ var headers = new Dictionary
+ {
+ ["NServiceBus.MessageId"] = messageId,
+ ["NServiceBus.ProcessingStarted"] = DateTime.UtcNow.AddSeconds(-1).ToString("O"),
+ ["NServiceBus.ProcessingEnded"] = DateTime.UtcNow.ToString("O"),
+ ["$.diagnostics.originating.hostid"] = Guid.NewGuid().ToString(),
+ ["NServiceBus.ProcessingEndpoint"] = "TestEndpoint"
+ };
+
+ var metadata = new Dictionary
+ {
+ ["MessageId"] = messageId,
+ ["MessageType"] = "TestMessage",
+ ["TimeSent"] = DateTime.UtcNow,
+ ["IsSystemMessage"] = false
+ };
+
+ return new ProcessedMessage(headers, metadata);
+ }
+
+ static KnownEndpoint CreateKnownEndpoint(string name) => new()
+ {
+ Name = name,
+ HostId = Guid.NewGuid(),
+ Host = "localhost",
+ LastSeen = DateTime.UtcNow
+ };
+
+ static SagaSnapshot CreateSagaSnapshot(string sagaType) => new()
+ {
+ SagaId = Guid.NewGuid(),
+ SagaType = sagaType,
+ StartTime = DateTime.UtcNow.AddMinutes(-1),
+ FinishTime = DateTime.UtcNow,
+ Status = SagaStateChangeStatus.Updated,
+ StateAfterChange = "{ }",
+ Endpoint = "TestEndpoint",
+ ProcessedAt = DateTime.UtcNow,
+ InitiatingMessage = new InitiatingMessage
+ {
+ MessageId = $"{sagaType}-init-msg",
+ MessageType = "TestMessage",
+ IsSagaTimeoutMessage = false,
+ OriginatingEndpoint = "Sender",
+ OriginatingMachine = "localhost",
+ TimeSent = DateTime.UtcNow.AddMinutes(-1),
+ Intent = "Send"
+ },
+ OutgoingMessages = []
+ };
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/NullBodyStorageTests.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/NullBodyStorageTests.cs
new file mode 100644
index 0000000000..c18f48473b
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/NullBodyStorageTests.cs
@@ -0,0 +1,53 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB
+{
+ using System.IO;
+ using System.Text;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using NUnit.Framework;
+ using ServiceControl.Audit.Persistence.MongoDB.BodyStorage;
+
+ ///
+ /// Unit tests for NullBodyStorage (no MongoDB required).
+ ///
+ [TestFixture]
+ class NullBodyStorageTests
+ {
+ [Test]
+ public async Task Store_should_complete_without_error()
+ {
+ var storage = new NullBodyStorage();
+ var bodyContent = "test body content";
+
+ using var bodyStream = new MemoryStream(Encoding.UTF8.GetBytes(bodyContent));
+
+ // Should not throw
+ await storage.Store("test-id", "text/plain", (int)bodyStream.Length, bodyStream, CancellationToken.None).ConfigureAwait(false);
+ }
+
+ [Test]
+ public async Task TryFetch_should_return_no_result()
+ {
+ var storage = new NullBodyStorage();
+
+ var result = await storage.TryFetch("any-body-id", CancellationToken.None).ConfigureAwait(false);
+
+ Assert.That(result.HasResult, Is.False, "NullBodyStorage should always return HasResult=false");
+ }
+
+ [Test]
+ public async Task TryFetch_should_return_no_result_even_after_store()
+ {
+ var storage = new NullBodyStorage();
+ var bodyId = "stored-body-id";
+ var bodyContent = "some content";
+
+ using var bodyStream = new MemoryStream(Encoding.UTF8.GetBytes(bodyContent));
+ await storage.Store(bodyId, "text/plain", (int)bodyStream.Length, bodyStream, CancellationToken.None).ConfigureAwait(false);
+
+ var result = await storage.TryFetch(bodyId, CancellationToken.None).ConfigureAwait(false);
+
+ Assert.That(result.HasResult, Is.False, "NullBodyStorage should not store anything");
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/PersistenceTestsConfiguration.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/PersistenceTestsConfiguration.cs
new file mode 100644
index 0000000000..0a76e78b5d
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/PersistenceTestsConfiguration.cs
@@ -0,0 +1,102 @@
+namespace ServiceControl.Audit.Persistence.Tests
+{
+ using System;
+ using System.Threading.Tasks;
+ using global::MongoDB.Driver;
+ using Microsoft.Extensions.DependencyInjection;
+ using Microsoft.Extensions.Hosting;
+ using NServiceBus.CustomChecks;
+ using ServiceControl.Audit.Auditing.BodyStorage;
+ using ServiceControl.Audit.Persistence.MongoDB;
+ using UnitOfWork;
+
+ class PersistenceTestsConfiguration
+ {
+ public IAuditDataStore AuditDataStore { get; private set; }
+
+ public IFailedAuditStorage FailedAuditStorage { get; private set; }
+
+ public IBodyStorage BodyStorage { get; private set; }
+
+ public IAuditIngestionUnitOfWorkFactory AuditIngestionUnitOfWorkFactory { get; private set; }
+
+ public IMongoDatabase MongoDatabase { get; private set; }
+
+ public IServiceProvider ServiceProvider => host.Services;
+
+ public string Name => "MongoDB";
+
+ public async Task Configure(Action setSettings)
+ {
+ var config = new MongoPersistenceConfiguration();
+ var hostBuilder = Host.CreateApplicationBuilder();
+ var persistenceSettings = new PersistenceSettings(TimeSpan.FromHours(1), true, 100000);
+
+ setSettings(persistenceSettings);
+
+ // Get or start the shared MongoDB container
+ _ = await SharedMongoDbContainer.GetInstance().ConfigureAwait(false);
+ var baseConnectionString = SharedMongoDbContainer.GetConnectionString();
+
+ // Use a unique database name per test to ensure isolation
+ databaseName = $"test_{Guid.NewGuid():N}";
+ var builder = new MongoUrlBuilder(baseConnectionString)
+ {
+ DatabaseName = databaseName
+ };
+
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.ConnectionStringKey] =
+ builder.ToString();
+
+ var persistence = config.Create(persistenceSettings);
+ persistence.AddPersistence(hostBuilder.Services);
+ persistence.AddInstaller(hostBuilder.Services);
+
+ var assembly = typeof(MongoPersistenceConfiguration).Assembly;
+
+ foreach (var type in assembly.DefinedTypes)
+ {
+ if (type.IsAssignableTo(typeof(ICustomCheck)))
+ {
+ hostBuilder.Services.AddTransient(typeof(ICustomCheck), type);
+ }
+ }
+
+ host = hostBuilder.Build();
+ await host.StartAsync().ConfigureAwait(false);
+
+ AuditDataStore = host.Services.GetRequiredService();
+ FailedAuditStorage = host.Services.GetRequiredService();
+ BodyStorage = host.Services.GetRequiredService();
+ AuditIngestionUnitOfWorkFactory = host.Services.GetRequiredService();
+
+ var clientProvider = host.Services.GetRequiredService();
+ MongoDatabase = clientProvider.Database;
+ }
+
+ public Task CompleteDBOperation()
+ {
+ // MongoDB doesn't have deferred index updates like RavenDB
+ // Operations are immediately visible after write acknowledgment
+ return Task.CompletedTask;
+ }
+
+ public async Task Cleanup()
+ {
+ if (MongoDatabase != null)
+ {
+ // Drop the test database to clean up
+ await MongoDatabase.Client.DropDatabaseAsync(databaseName).ConfigureAwait(false);
+ }
+
+ if (host != null)
+ {
+ await host.StopAsync().ConfigureAwait(false);
+ host.Dispose();
+ }
+ }
+
+ string databaseName;
+ IHost host;
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/ServiceControl.Audit.Persistence.Tests.MongoDB.csproj b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/ServiceControl.Audit.Persistence.Tests.MongoDB.csproj
new file mode 100644
index 0000000000..51aba9a1da
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/ServiceControl.Audit.Persistence.Tests.MongoDB.csproj
@@ -0,0 +1,32 @@
+
+
+
+ net8.0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/AuditDataStoreTestsBase.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/AuditDataStoreTestsBase.cs
new file mode 100644
index 0000000000..0135fae5d7
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/AuditDataStoreTestsBase.cs
@@ -0,0 +1,247 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.Shared
+{
+ using System;
+ using System.Collections.Generic;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Driver;
+ using Microsoft.Extensions.DependencyInjection;
+ using Microsoft.Extensions.Hosting;
+ using NUnit.Framework;
+ using ServiceControl.Audit.Auditing;
+ using ServiceControl.Audit.Infrastructure;
+ using ServiceControl.Audit.Persistence.MongoDB;
+ using ServiceControl.Audit.Persistence.MongoDB.Collections;
+ using ServiceControl.Audit.Persistence.UnitOfWork;
+ using Infrastructure;
+
+ ///
+ /// Base class for AuditDataStore tests that can run against different MongoDB-compatible products.
+ ///
+ public abstract class AuditDataStoreTestsBase
+ {
+ protected IMongoTestEnvironment Environment { get; private set; }
+
+ IHost host;
+ IMongoDatabase database;
+ IAuditDataStore dataStore;
+ IAuditIngestionUnitOfWorkFactory unitOfWorkFactory;
+ string databaseName;
+
+ protected abstract IMongoTestEnvironment CreateEnvironment();
+
+ [OneTimeSetUp]
+ public async Task OneTimeSetUp()
+ {
+ Environment = CreateEnvironment();
+ await Environment.Initialize().ConfigureAwait(false);
+ }
+
+ [OneTimeTearDown]
+ public async Task OneTimeTearDown()
+ {
+ if (Environment != null)
+ {
+ await Environment.Cleanup().ConfigureAwait(false);
+ }
+ }
+
+ [SetUp]
+ public async Task SetUp()
+ {
+ databaseName = $"datastore_test_{Guid.NewGuid():N}";
+ var connectionString = Environment.BuildConnectionString(databaseName);
+
+ var persistenceSettings = new PersistenceSettings(TimeSpan.FromHours(1), true, 100000);
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.ConnectionStringKey] = connectionString;
+
+ var config = new MongoPersistenceConfiguration();
+ var persistence = config.Create(persistenceSettings);
+
+ var hostBuilder = Host.CreateApplicationBuilder();
+ persistence.AddPersistence(hostBuilder.Services);
+
+ host = hostBuilder.Build();
+ await host.StartAsync().ConfigureAwait(false);
+
+ var clientProvider = host.Services.GetRequiredService();
+ database = clientProvider.Database;
+ dataStore = host.Services.GetRequiredService();
+ unitOfWorkFactory = host.Services.GetRequiredService();
+ }
+
+ [TearDown]
+ public async Task TearDown()
+ {
+ if (database != null)
+ {
+ await database.Client.DropDatabaseAsync(databaseName).ConfigureAwait(false);
+ }
+
+ if (host != null)
+ {
+ await host.StopAsync().ConfigureAwait(false);
+ host.Dispose();
+ }
+ }
+
+ [Test]
+ public async Task Should_query_and_return_messages()
+ {
+ // Insert test messages
+ for (int i = 0; i < 5; i++)
+ {
+ await InsertProcessedMessage($"msg-{i}").ConfigureAwait(false);
+ }
+
+ // Query messages
+ var result = await dataStore.GetMessages(
+ includeSystemMessages: true,
+ pagingInfo: new PagingInfo(1, 50),
+ sortInfo: new SortInfo("processed_at", "desc"),
+ timeSentRange: null,
+ cancellationToken: CancellationToken.None).ConfigureAwait(false);
+
+ Assert.Multiple(() =>
+ {
+ Assert.That(result.Results, Has.Count.EqualTo(5));
+ Assert.That(result.QueryStats.TotalCount, Is.EqualTo(5));
+ });
+ }
+
+ [Test]
+ public async Task Queries_should_use_indexes()
+ {
+ await InsertProcessedMessage("test-msg", endpoint: "TestEndpoint").ConfigureAwait(false);
+
+ // Test endpoint+processedAt compound index
+ var explanation = await RunExplainCommand(
+ CollectionNames.ProcessedMessages,
+ new BsonDocument("messageMetadata.ReceivingEndpoint.Name", "TestEndpoint"),
+ new BsonDocument("processedAt", -1),
+ 50).ConfigureAwait(false);
+
+ AssertIndexUsed(explanation, "endpoint_processedAt");
+ }
+
+ async Task InsertProcessedMessage(string messageId, string endpoint = "TestEndpoint")
+ {
+ var headers = new Dictionary
+ {
+ ["NServiceBus.MessageId"] = messageId,
+ ["NServiceBus.ConversationId"] = Guid.NewGuid().ToString(),
+ ["NServiceBus.ProcessingStarted"] = DateTime.UtcNow.AddSeconds(-1).ToString("O"),
+ ["NServiceBus.ProcessingEnded"] = DateTime.UtcNow.ToString("O"),
+ ["$.diagnostics.originating.hostid"] = Guid.NewGuid().ToString(),
+ ["NServiceBus.ProcessingEndpoint"] = endpoint,
+ ["NServiceBus.ContentType"] = "application/json"
+ };
+
+ var endpointDetails = new Dictionary
+ {
+ ["Name"] = endpoint,
+ ["HostId"] = Guid.NewGuid(),
+ ["Host"] = "localhost"
+ };
+
+ var metadata = new Dictionary
+ {
+ ["MessageId"] = messageId,
+ ["MessageType"] = "TestMessage",
+ ["TimeSent"] = DateTime.UtcNow,
+ ["IsSystemMessage"] = false,
+ ["ConversationId"] = Guid.NewGuid().ToString(),
+ ["ReceivingEndpoint"] = endpointDetails,
+ ["SendingEndpoint"] = endpointDetails,
+ ["ContentLength"] = 0
+ };
+
+ var message = new ProcessedMessage(headers, metadata);
+
+ var unitOfWork = await unitOfWorkFactory.StartNew(1, CancellationToken.None).ConfigureAwait(false);
+ try
+ {
+ await unitOfWork.RecordProcessedMessage(message, default, CancellationToken.None).ConfigureAwait(false);
+ }
+ finally
+ {
+ await unitOfWork.DisposeAsync().ConfigureAwait(false);
+ }
+ }
+
+ async Task RunExplainCommand(string collectionName, BsonDocument filter, BsonDocument sort, int? limit)
+ {
+ var findCommand = new BsonDocument
+ {
+ { "find", collectionName },
+ { "filter", filter }
+ };
+
+ if (sort != null)
+ {
+ findCommand["sort"] = sort;
+ }
+
+ if (limit.HasValue)
+ {
+ findCommand["limit"] = limit.Value;
+ }
+
+ var explainCommand = new BsonDocument
+ {
+ { "explain", findCommand },
+ { "verbosity", "queryPlanner" }
+ };
+
+ return await database.RunCommandAsync(explainCommand).ConfigureAwait(false);
+ }
+
+ static void AssertIndexUsed(BsonDocument explanation, string expectedIndexName)
+ {
+ var queryPlanner = explanation.GetValue("queryPlanner", null)?.AsBsonDocument;
+ Assert.That(queryPlanner, Is.Not.Null, "No queryPlanner in explain output");
+
+ var winningPlan = queryPlanner.GetValue("winningPlan", null)?.AsBsonDocument;
+ Assert.That(winningPlan, Is.Not.Null, "No winningPlan in explain output");
+
+ var indexName = FindIndexNameInPlan(winningPlan);
+
+ Assert.That(indexName, Is.EqualTo(expectedIndexName),
+ $"Expected query to use index '{expectedIndexName}' but used '{indexName ?? "COLLSCAN"}'");
+ }
+
+ static string FindIndexNameInPlan(BsonDocument plan)
+ {
+ if (plan.TryGetValue("stage", out var stage) && stage.AsString == "IXSCAN")
+ {
+ if (plan.TryGetValue("indexName", out var indexName))
+ {
+ return indexName.AsString;
+ }
+ }
+
+ if (plan.TryGetValue("inputStage", out var inputStage) && inputStage.IsBsonDocument)
+ {
+ return FindIndexNameInPlan(inputStage.AsBsonDocument);
+ }
+
+ if (plan.TryGetValue("inputStages", out var inputStages) && inputStages.IsBsonArray)
+ {
+ foreach (var childStage in inputStages.AsBsonArray)
+ {
+ if (childStage.IsBsonDocument)
+ {
+ var result = FindIndexNameInPlan(childStage.AsBsonDocument);
+ if (result != null)
+ {
+ return result;
+ }
+ }
+ }
+ }
+
+ return null;
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/BodyStorageTestsBase.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/BodyStorageTestsBase.cs
new file mode 100644
index 0000000000..32dc1daaea
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/BodyStorageTestsBase.cs
@@ -0,0 +1,323 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.Shared
+{
+ using System;
+ using System.Collections.Generic;
+ using System.IO;
+ using System.Text;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using global::MongoDB.Bson;
+ using global::MongoDB.Driver;
+ using Microsoft.Extensions.DependencyInjection;
+ using Microsoft.Extensions.Hosting;
+ using NUnit.Framework;
+ using ServiceControl.Audit.Auditing;
+ using ServiceControl.Audit.Auditing.BodyStorage;
+ using ServiceControl.Audit.Persistence.MongoDB;
+ using ServiceControl.Audit.Persistence.MongoDB.Collections;
+ using ServiceControl.Audit.Persistence.UnitOfWork;
+ using Infrastructure;
+
+ ///
+ /// Base class for body storage tests that can run against different MongoDB-compatible products.
+ ///
+ public abstract class BodyStorageTestsBase
+ {
+ protected IMongoTestEnvironment Environment { get; private set; }
+
+ IHost host;
+ IMongoDatabase database;
+ string databaseName;
+
+ protected abstract IMongoTestEnvironment CreateEnvironment();
+
+ [OneTimeSetUp]
+ public async Task OneTimeSetUp()
+ {
+ Environment = CreateEnvironment();
+ await Environment.Initialize().ConfigureAwait(false);
+ }
+
+ [OneTimeTearDown]
+ public async Task OneTimeTearDown()
+ {
+ if (Environment != null)
+ {
+ await Environment.Cleanup().ConfigureAwait(false);
+ }
+ }
+
+ [SetUp]
+ public async Task SetUp()
+ {
+ databaseName = $"test_{Guid.NewGuid():N}";
+ var connectionString = Environment.BuildConnectionString(databaseName);
+
+ var persistenceSettings = new PersistenceSettings(TimeSpan.FromHours(1), true, 100000);
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.ConnectionStringKey] = connectionString;
+
+ var config = new MongoPersistenceConfiguration();
+ var persistence = config.Create(persistenceSettings);
+
+ var hostBuilder = Host.CreateApplicationBuilder();
+ persistence.AddPersistence(hostBuilder.Services);
+
+ host = hostBuilder.Build();
+ await host.StartAsync().ConfigureAwait(false);
+
+ var clientProvider = host.Services.GetRequiredService();
+ database = clientProvider.Database;
+ }
+
+ [TearDown]
+ public async Task TearDown()
+ {
+ if (database != null)
+ {
+ await database.Client.DropDatabaseAsync(databaseName).ConfigureAwait(false);
+ }
+
+ if (host != null)
+ {
+ await host.StopAsync().ConfigureAwait(false);
+ host.Dispose();
+ }
+ }
+
+ [Test]
+ public async Task Should_store_and_retrieve_text_body()
+ {
+ var factory = host.Services.GetRequiredService();
+ var bodyStorage = host.Services.GetRequiredService();
+
+ var messageId = "text-body-test";
+ var bodyContent = "{ \"message\": \"Hello, World!\" }";
+ var message = CreateProcessedMessage(messageId, "application/json");
+
+ // Ingest message with body via unit of work
+ await IngestMessage(factory, message, Encoding.UTF8.GetBytes(bodyContent)).ConfigureAwait(false);
+
+ // Retrieve body via IBodyStorage
+ var result = await bodyStorage.TryFetch(messageId, CancellationToken.None).ConfigureAwait(false);
+
+ Assert.Multiple(() =>
+ {
+ Assert.That(result.HasResult, Is.True, "Should find stored body");
+ Assert.That(result.ContentType, Is.EqualTo("application/json"), "Content type should match");
+ Assert.That(result.BodySize, Is.EqualTo(Encoding.UTF8.GetByteCount(bodyContent)), "Body size should match");
+ });
+
+ using var reader = new StreamReader(result.Stream);
+ var retrievedContent = await reader.ReadToEndAsync().ConfigureAwait(false);
+ Assert.That(retrievedContent, Is.EqualTo(bodyContent), "Body content should match");
+ }
+
+ [Test]
+ public async Task Should_store_and_retrieve_binary_body()
+ {
+ var factory = host.Services.GetRequiredService();
+ var bodyStorage = host.Services.GetRequiredService();
+
+ var messageId = "binary-body-test";
+ // Invalid UTF-8 sequence - will be stored as binary, not text
+ var binaryContent = new byte[] { 0x00, 0x01, 0x02, 0xFF, 0xFE, 0xFD };
+ var message = CreateProcessedMessage(messageId, "application/octet-stream");
+
+ // Ingest message with binary body
+ await IngestMessage(factory, message, binaryContent).ConfigureAwait(false);
+
+ // Binary bodies should be retrievable
+ var result = await bodyStorage.TryFetch(messageId, CancellationToken.None).ConfigureAwait(false);
+
+ Assert.Multiple(() =>
+ {
+ Assert.That(result.HasResult, Is.True, "Should find stored binary body");
+ Assert.That(result.ContentType, Is.EqualTo("application/octet-stream"), "Content type should match");
+ Assert.That(result.BodySize, Is.EqualTo(binaryContent.Length), "Body size should match");
+ });
+
+ // Verify content matches
+ using var memoryStream = new MemoryStream();
+ await result.Stream.CopyToAsync(memoryStream).ConfigureAwait(false);
+ Assert.That(memoryStream.ToArray(), Is.EqualTo(binaryContent), "Binary content should match");
+
+ // Verify it's stored in binaryBody field, not body field
+ var collection = database.GetCollection(CollectionNames.ProcessedMessages);
+ var doc = await collection.Find(Builders.Filter.Eq("_id", messageId)).FirstOrDefaultAsync().ConfigureAwait(false);
+ Assert.That(doc, Is.Not.Null, "Message should be stored");
+ Assert.That(doc.Contains("body") && doc["body"] != BsonNull.Value, Is.False, "Text body field should be null for binary content");
+ Assert.That(doc.Contains("binaryBody") && doc["binaryBody"] != BsonNull.Value, Is.True, "Binary body field should have content");
+ }
+
+ [Test]
+ public async Task Should_return_no_result_for_nonexistent_body()
+ {
+ var bodyStorage = host.Services.GetRequiredService();
+
+ var result = await bodyStorage.TryFetch("nonexistent-body-id", CancellationToken.None).ConfigureAwait(false);
+
+ Assert.That(result.HasResult, Is.False, "Should not find nonexistent body");
+ }
+
+ [Test]
+ public async Task Should_store_body_inline_in_processed_message()
+ {
+ var factory = host.Services.GetRequiredService();
+
+ var messageId = "inline-storage-test";
+ var bodyContent = "{ \"test\": \"inline body storage\" }";
+ var message = CreateProcessedMessage(messageId, "application/json");
+
+ await IngestMessage(factory, message, Encoding.UTF8.GetBytes(bodyContent)).ConfigureAwait(false);
+
+ // Verify body is stored inline in ProcessedMessages collection
+ var collection = database.GetCollection(CollectionNames.ProcessedMessages);
+ var doc = await collection.Find(Builders.Filter.Eq("_id", messageId)).FirstOrDefaultAsync().ConfigureAwait(false);
+
+ Assert.That(doc, Is.Not.Null, "Message should be stored");
+ Assert.That(doc.Contains("body"), Is.True, "Document should have body field");
+ Assert.That(doc["body"].AsString, Is.EqualTo(bodyContent), "Body should be stored as plain UTF-8 text");
+ }
+
+ [Test]
+ public async Task Should_not_store_body_when_body_storage_type_is_none()
+ {
+ // Arrange - Create a separate host with BodyStorageType.None
+ var testDatabaseName = $"test_nobody_{Guid.NewGuid():N}";
+ var connectionString = Environment.BuildConnectionString(testDatabaseName);
+
+ var persistenceSettings = new PersistenceSettings(TimeSpan.FromHours(1), true, 100000);
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.ConnectionStringKey] = connectionString;
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.BodyStorageTypeKey] = "None";
+
+ var config = new MongoPersistenceConfiguration();
+ var persistence = config.Create(persistenceSettings);
+
+ var hostBuilder = Host.CreateApplicationBuilder();
+ persistence.AddPersistence(hostBuilder.Services);
+
+ using var testHost = hostBuilder.Build();
+ await testHost.StartAsync().ConfigureAwait(false);
+
+ try
+ {
+ var clientProvider = testHost.Services.GetRequiredService();
+ var testDatabase = clientProvider.Database;
+ var factory = testHost.Services.GetRequiredService();
+ var bodyStorage = testHost.Services.GetRequiredService();
+
+ var messageId = "none-storage-test";
+ var bodyContent = "This body should NOT be stored";
+ var message = CreateProcessedMessage(messageId, "text/plain");
+
+ // Act - Ingest message with body
+ await IngestMessage(factory, message, Encoding.UTF8.GetBytes(bodyContent)).ConfigureAwait(false);
+
+ // Assert - TryFetch should return no result
+ var result = await bodyStorage.TryFetch(messageId, CancellationToken.None).ConfigureAwait(false);
+ Assert.That(result.HasResult, Is.False, "Body should not be retrievable when BodyStorageType is None");
+
+ // Assert - Message should be stored but without body
+ var collection = testDatabase.GetCollection(CollectionNames.ProcessedMessages);
+ var doc = await collection.Find(Builders.Filter.Eq("_id", messageId)).FirstOrDefaultAsync().ConfigureAwait(false);
+ Assert.That(doc, Is.Not.Null, "Message should be stored");
+ Assert.That(doc.Contains("body") && doc["body"] != BsonNull.Value, Is.False, "Body field should be null when BodyStorageType is None");
+ }
+ finally
+ {
+ var client = new MongoClient(connectionString);
+ await client.DropDatabaseAsync(testDatabaseName).ConfigureAwait(false);
+ await testHost.StopAsync().ConfigureAwait(false);
+ }
+ }
+
+ [Test]
+ public async Task Should_not_store_body_when_body_exceeds_max_size()
+ {
+ // Arrange - Create a host with a small max body size
+ var testDatabaseName = $"test_maxsize_{Guid.NewGuid():N}";
+ var connectionString = Environment.BuildConnectionString(testDatabaseName);
+
+ // Set max body size to 10 bytes
+ var persistenceSettings = new PersistenceSettings(TimeSpan.FromHours(1), true, 10);
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.ConnectionStringKey] = connectionString;
+
+ var config = new MongoPersistenceConfiguration();
+ var persistence = config.Create(persistenceSettings);
+
+ var hostBuilder = Host.CreateApplicationBuilder();
+ persistence.AddPersistence(hostBuilder.Services);
+
+ using var testHost = hostBuilder.Build();
+ await testHost.StartAsync().ConfigureAwait(false);
+
+ try
+ {
+ var clientProvider = testHost.Services.GetRequiredService();
+ var testDatabase = clientProvider.Database;
+ var factory = testHost.Services.GetRequiredService();
+ var bodyStorage = testHost.Services.GetRequiredService();
+
+ var messageId = "large-body-msg";
+ var message = CreateProcessedMessage(messageId, "text/plain");
+ var body = Encoding.UTF8.GetBytes("This body is larger than 10 bytes and should NOT be stored");
+
+ // Act - Ingest a message with a body that exceeds max size
+ await IngestMessage(factory, message, body).ConfigureAwait(false);
+
+ // Assert - Message should be stored, but body should NOT (too large)
+ var collection = testDatabase.GetCollection(CollectionNames.ProcessedMessages);
+ var doc = await collection.Find(Builders.Filter.Eq("_id", messageId)).FirstOrDefaultAsync().ConfigureAwait(false);
+
+ Assert.That(doc, Is.Not.Null, "Message should be stored");
+ Assert.That(doc.Contains("body") && doc["body"] != BsonNull.Value, Is.False, "Body should NOT be stored when it exceeds max size");
+
+ // Assert - TryFetch should return no result
+ var result = await bodyStorage.TryFetch(messageId, CancellationToken.None).ConfigureAwait(false);
+ Assert.That(result.HasResult, Is.False, "Body should not be retrievable when it exceeds max size");
+ }
+ finally
+ {
+ var client = new MongoClient(connectionString);
+ await client.DropDatabaseAsync(testDatabaseName).ConfigureAwait(false);
+ await testHost.StopAsync().ConfigureAwait(false);
+ }
+ }
+
+ static async Task IngestMessage(IAuditIngestionUnitOfWorkFactory factory, ProcessedMessage message, byte[] body)
+ {
+ var unitOfWork = await factory.StartNew(10, CancellationToken.None).ConfigureAwait(false);
+ try
+ {
+ await unitOfWork.RecordProcessedMessage(message, body, CancellationToken.None).ConfigureAwait(false);
+ }
+ finally
+ {
+ await unitOfWork.DisposeAsync().ConfigureAwait(false);
+ }
+ }
+
+ static ProcessedMessage CreateProcessedMessage(string messageId, string contentType = "text/plain")
+ {
+ var headers = new Dictionary
+ {
+ ["NServiceBus.MessageId"] = messageId,
+ ["NServiceBus.ContentType"] = contentType,
+ ["NServiceBus.ProcessingStarted"] = DateTime.UtcNow.AddSeconds(-1).ToString("O"),
+ ["NServiceBus.ProcessingEnded"] = DateTime.UtcNow.ToString("O"),
+ ["$.diagnostics.originating.hostid"] = Guid.NewGuid().ToString(),
+ ["NServiceBus.ProcessingEndpoint"] = "TestEndpoint"
+ };
+
+ var metadata = new Dictionary
+ {
+ ["MessageId"] = messageId,
+ ["MessageType"] = "TestMessage",
+ ["TimeSent"] = DateTime.UtcNow,
+ ["IsSystemMessage"] = false
+ };
+
+ return new ProcessedMessage(headers, metadata) { Id = messageId };
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/FailedAuditStorageTestsBase.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/FailedAuditStorageTestsBase.cs
new file mode 100644
index 0000000000..8f8ee41d3a
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/FailedAuditStorageTestsBase.cs
@@ -0,0 +1,105 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.Shared
+{
+ using System;
+ using System.Collections.Generic;
+ using System.Threading.Tasks;
+ using Microsoft.Extensions.DependencyInjection;
+ using Microsoft.Extensions.Hosting;
+ using NUnit.Framework;
+ using ServiceControl.Audit.Auditing;
+ using ServiceControl.Audit.Persistence.MongoDB;
+ using Infrastructure;
+
+ ///
+ /// Base class for FailedAuditStorage tests that can run against different MongoDB-compatible products.
+ ///
+ public abstract class FailedAuditStorageTestsBase
+ {
+ protected IMongoTestEnvironment Environment { get; private set; }
+
+ IHost host;
+ string databaseName;
+ IFailedAuditStorage failedAuditStorage;
+
+ protected abstract IMongoTestEnvironment CreateEnvironment();
+
+ [OneTimeSetUp]
+ public async Task OneTimeSetUp()
+ {
+ Environment = CreateEnvironment();
+ await Environment.Initialize().ConfigureAwait(false);
+ }
+
+ [OneTimeTearDown]
+ public async Task OneTimeTearDown()
+ {
+ if (Environment != null)
+ {
+ await Environment.Cleanup().ConfigureAwait(false);
+ }
+ }
+
+ [SetUp]
+ public async Task SetUp()
+ {
+ databaseName = $"test_{Guid.NewGuid():N}";
+ var connectionString = Environment.BuildConnectionString(databaseName);
+
+ var persistenceSettings = new PersistenceSettings(TimeSpan.FromHours(1), true, 100000);
+ persistenceSettings.PersisterSpecificSettings[MongoPersistenceConfiguration.ConnectionStringKey] = connectionString;
+
+ var config = new MongoPersistenceConfiguration();
+ var persistence = config.Create(persistenceSettings);
+
+ var hostBuilder = Host.CreateApplicationBuilder();
+ persistence.AddPersistence(hostBuilder.Services);
+
+ host = hostBuilder.Build();
+ await host.StartAsync().ConfigureAwait(false);
+
+ failedAuditStorage = host.Services.GetRequiredService();
+ }
+
+ [TearDown]
+ public async Task TearDown()
+ {
+ if (host != null)
+ {
+ var clientProvider = host.Services.GetRequiredService();
+ await clientProvider.Database.Client.DropDatabaseAsync(databaseName).ConfigureAwait(false);
+ await host.StopAsync().ConfigureAwait(false);
+ host.Dispose();
+ }
+ }
+
+ [Test]
+ public async Task Should_save_failed_audit_import()
+ {
+ var failedImport = CreateFailedAuditImport("test-msg-1");
+
+ await failedAuditStorage.SaveFailedAuditImport(failedImport).ConfigureAwait(false);
+
+ var count = await failedAuditStorage.GetFailedAuditsCount().ConfigureAwait(false);
+ Assert.That(count, Is.EqualTo(1));
+ }
+
+ static FailedAuditImport CreateFailedAuditImport(string messageId)
+ {
+ return new FailedAuditImport
+ {
+ Id = $"FailedAuditImports/{Guid.NewGuid()}",
+ Message = new FailedTransportMessage
+ {
+ Id = messageId,
+ Headers = new Dictionary
+ {
+ ["NServiceBus.MessageId"] = messageId,
+ ["NServiceBus.EnclosedMessageTypes"] = "TestMessage"
+ },
+ Body = System.Text.Encoding.UTF8.GetBytes($"Body for {messageId}")
+ },
+ ExceptionInfo = $"Exception for {messageId}"
+ };
+ }
+ }
+}
diff --git a/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/FullTextSearchTestsBase.cs b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/FullTextSearchTestsBase.cs
new file mode 100644
index 0000000000..0966867a72
--- /dev/null
+++ b/src/ServiceControl.Audit.Persistence.Tests.MongoDB/Shared/FullTextSearchTestsBase.cs
@@ -0,0 +1,344 @@
+namespace ServiceControl.Audit.Persistence.Tests.MongoDB.Shared
+{
+ using System;
+ using System.Collections.Generic;
+ using System.Text;
+ using System.Threading;
+ using System.Threading.Tasks;
+ using Microsoft.Extensions.DependencyInjection;
+ using Microsoft.Extensions.Hosting;
+ using NUnit.Framework;
+ using ServiceControl.Audit.Auditing;
+ using ServiceControl.Audit.Infrastructure;
+ using ServiceControl.Audit.Monitoring;
+ using ServiceControl.Audit.Persistence.MongoDB;
+ using ServiceControl.Audit.Persistence.UnitOfWork;
+ using Infrastructure;
+
+ ///