diff --git a/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedCrossFeedRangeAsyncEnumerable.cs b/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedCrossFeedRangeAsyncEnumerable.cs index 369202892c..9823538fba 100644 --- a/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedCrossFeedRangeAsyncEnumerable.cs +++ b/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedCrossFeedRangeAsyncEnumerable.cs @@ -10,8 +10,9 @@ namespace Microsoft.Azure.Cosmos.ChangeFeed using Microsoft.Azure.Cosmos.ChangeFeed.Pagination; using Microsoft.Azure.Cosmos.Pagination; using Microsoft.Azure.Cosmos.Query.Core.Monads; - using Microsoft.Azure.Cosmos.Serializer; - + using Microsoft.Azure.Cosmos.Serializer; + using Microsoft.Azure.Cosmos.Tracing; + internal sealed class ChangeFeedCrossFeedRangeAsyncEnumerable : IAsyncEnumerable> { private readonly IDocumentContainer documentContainer; @@ -37,12 +38,13 @@ public IAsyncEnumerator> GetAsyncEnumerator(Cancellatio CrossPartitionChangeFeedAsyncEnumerator innerEnumerator = CrossPartitionChangeFeedAsyncEnumerator.Create( this.documentContainer, innerState, - this.changeFeedPaginationOptions, - cancellationToken); + this.changeFeedPaginationOptions); - return new ChangeFeedCrossFeedRangeAsyncEnumerator( + ChangeFeedCrossFeedRangeAsyncEnumerator changeFeedEnumerator = new ChangeFeedCrossFeedRangeAsyncEnumerator( innerEnumerator, - this.jsonSerializationFormatOptions); + this.jsonSerializationFormatOptions); + + return new TracingAsyncEnumerator>(changeFeedEnumerator, NoOpTrace.Singleton, cancellationToken); } } } diff --git a/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedCrossFeedRangeAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedCrossFeedRangeAsyncEnumerator.cs index 7186cbb057..b964b4e7e4 100644 --- a/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedCrossFeedRangeAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedCrossFeedRangeAsyncEnumerator.cs @@ -5,14 +5,15 @@ namespace Microsoft.Azure.Cosmos.ChangeFeed { using System; - using System.Collections.Generic; + using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.ChangeFeed.Pagination; using Microsoft.Azure.Cosmos.Pagination; using Microsoft.Azure.Cosmos.Query.Core.Monads; - using Microsoft.Azure.Cosmos.Serializer; - - internal sealed class ChangeFeedCrossFeedRangeAsyncEnumerator : IAsyncEnumerator> + using Microsoft.Azure.Cosmos.Serializer; + using Microsoft.Azure.Cosmos.Tracing; + + internal sealed class ChangeFeedCrossFeedRangeAsyncEnumerator : ITracingAsyncEnumerator> { private readonly CrossPartitionChangeFeedAsyncEnumerator enumerator; private readonly JsonSerializationFormatOptions jsonSerializationFormatOptions; @@ -29,9 +30,9 @@ public ChangeFeedCrossFeedRangeAsyncEnumerator( public ValueTask DisposeAsync() => this.enumerator.DisposeAsync(); - public async ValueTask MoveNextAsync() + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - if (!await this.enumerator.MoveNextAsync()) + if (!await this.enumerator.MoveNextAsync(trace, cancellationToken)) { throw new InvalidOperationException("Change Feed should always be able to move next."); } diff --git a/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedIteratorCore.cs b/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedIteratorCore.cs index 87b34cca17..bafe2c3b89 100644 --- a/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedIteratorCore.cs +++ b/Microsoft.Azure.Cosmos/src/ChangeFeed/ChangeFeedIteratorCore.cs @@ -208,8 +208,7 @@ public ChangeFeedIteratorCore( changeFeedRequestOptions?.PageSizeHint, changeFeedRequestOptions?.JsonSerializationFormatOptions?.JsonSerializationFormat, additionalHeaders, - this.changeFeedQuerySpec), - cancellationToken: default); + this.changeFeedQuerySpec)); TryCatch monadicEnumerator = TryCatch.FromResult(enumerator); return monadicEnumerator; @@ -274,11 +273,10 @@ private async Task ReadNextInternalAsync(ITrace trace, Cancella } CrossPartitionChangeFeedAsyncEnumerator enumerator = monadicEnumerator.Result; - enumerator.SetCancellationToken(cancellationToken); try { - if (!await enumerator.MoveNextAsync(trace)) + if (!await enumerator.MoveNextAsync(trace, cancellationToken)) { throw new InvalidOperationException("ChangeFeed enumerator should always have a next continuation"); } diff --git a/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedNotModifiedPage.cs b/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedNotModifiedPage.cs index 7c1b09bf59..814d1b583e 100644 --- a/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedNotModifiedPage.cs +++ b/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedNotModifiedPage.cs @@ -12,14 +12,16 @@ internal sealed class ChangeFeedNotModifiedPage : ChangeFeedPage private static readonly ImmutableHashSet bannedHeaders = new HashSet().ToImmutableHashSet(); public ChangeFeedNotModifiedPage( - double requestCharge, + double requestCharge, string activityId, IReadOnlyDictionary additionalHeaders, ChangeFeedState state) : base(requestCharge, activityId, additionalHeaders, state) { - } - + } + + public override int ItemCount => 0; + protected override ImmutableHashSet DerivedClassBannedHeaders => bannedHeaders; } } diff --git a/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedPartitionRangePageAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedPartitionRangePageAsyncEnumerator.cs index 4c0b6e169d..15228e92e9 100644 --- a/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedPartitionRangePageAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedPartitionRangePageAsyncEnumerator.cs @@ -19,9 +19,8 @@ internal sealed class ChangeFeedPartitionRangePageAsyncEnumerator : PartitionRan public ChangeFeedPartitionRangePageAsyncEnumerator( IChangeFeedDataSource changeFeedDataSource, FeedRangeState feedRangeState, - ChangeFeedPaginationOptions changeFeedPaginationOptions, - CancellationToken cancellationToken) - : base(feedRangeState, cancellationToken) + ChangeFeedPaginationOptions changeFeedPaginationOptions) + : base(feedRangeState) { this.changeFeedDataSource = changeFeedDataSource ?? throw new ArgumentNullException(nameof(changeFeedDataSource)); this.changeFeedPaginationOptions = changeFeedPaginationOptions ?? throw new ArgumentNullException(nameof(changeFeedPaginationOptions)); diff --git a/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedSuccessPage.cs b/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedSuccessPage.cs index caf9d3276f..8499a9f7ca 100644 --- a/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedSuccessPage.cs +++ b/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/ChangeFeedSuccessPage.cs @@ -15,17 +15,21 @@ internal sealed class ChangeFeedSuccessPage : ChangeFeedPage public ChangeFeedSuccessPage( Stream content, - double requestCharge, + double requestCharge, + int itemCount, string activityId, IReadOnlyDictionary additionalHeaders, ChangeFeedState state) : base(requestCharge, activityId, additionalHeaders, state) { - this.Content = content ?? throw new ArgumentNullException(nameof(content)); + this.Content = content ?? throw new ArgumentNullException(nameof(content)); + this.ItemCount = itemCount; } - public Stream Content { get; } - + public Stream Content { get; } + + public override int ItemCount { get; } + protected override ImmutableHashSet DerivedClassBannedHeaders => bannedHeaders; } } diff --git a/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/CrossPartitionChangeFeedAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/CrossPartitionChangeFeedAsyncEnumerator.cs index a359a9baaf..0ecb494546 100644 --- a/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/CrossPartitionChangeFeedAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/ChangeFeed/Pagination/CrossPartitionChangeFeedAsyncEnumerator.cs @@ -12,32 +12,24 @@ namespace Microsoft.Azure.Cosmos.ChangeFeed.Pagination using Microsoft.Azure.Cosmos.Query.Core.Monads; using Microsoft.Azure.Cosmos.Tracing; - internal sealed class CrossPartitionChangeFeedAsyncEnumerator : IAsyncEnumerator>> + internal sealed class CrossPartitionChangeFeedAsyncEnumerator : ITracingAsyncEnumerator>> { private readonly CrossPartitionRangePageAsyncEnumerator crossPartitionEnumerator; - private CancellationToken cancellationToken; private TryCatch>? bufferedException; private CrossPartitionChangeFeedAsyncEnumerator( - CrossPartitionRangePageAsyncEnumerator crossPartitionEnumerator, - CancellationToken cancellationToken) + CrossPartitionRangePageAsyncEnumerator crossPartitionEnumerator) { this.crossPartitionEnumerator = crossPartitionEnumerator ?? throw new ArgumentNullException(nameof(crossPartitionEnumerator)); - this.cancellationToken = cancellationToken; } public TryCatch> Current { get; private set; } public ValueTask DisposeAsync() => this.crossPartitionEnumerator.DisposeAsync(); - public ValueTask MoveNextAsync() - { - return this.MoveNextAsync(NoOpTrace.Singleton); - } - - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { @@ -53,7 +45,7 @@ public async ValueTask MoveNextAsync(ITrace trace) return true; } - if (!await this.crossPartitionEnumerator.MoveNextAsync(changeFeedMoveNextTrace)) + if (!await this.crossPartitionEnumerator.MoveNextAsync(changeFeedMoveNextTrace, cancellationToken)) { throw new InvalidOperationException("ChangeFeed should always have a next page."); } @@ -80,7 +72,7 @@ public async ValueTask MoveNextAsync(ITrace trace) double totalRequestCharge = backendPage.RequestCharge; do { - if (!await this.crossPartitionEnumerator.MoveNextAsync(drainNotModifedPages)) + if (!await this.crossPartitionEnumerator.MoveNextAsync(drainNotModifedPages, cancellationToken)) { throw new InvalidOperationException("ChangeFeed should always have a next page."); } @@ -107,7 +99,8 @@ public async ValueTask MoveNextAsync(ITrace trace) { backendPage = new ChangeFeedSuccessPage( changeFeedSuccessPage.Content, - totalRequestCharge, + totalRequestCharge, + changeFeedSuccessPage.ItemCount, changeFeedSuccessPage.ActivityId, changeFeedSuccessPage.AdditionalHeaders, changeFeedSuccessPage.State); @@ -115,7 +108,7 @@ public async ValueTask MoveNextAsync(ITrace trace) else { backendPage = new ChangeFeedNotModifiedPage( - totalRequestCharge, + totalRequestCharge, backendPage.ActivityId, backendPage.AdditionalHeaders, backendPage.State); @@ -133,17 +126,10 @@ public async ValueTask MoveNextAsync(ITrace trace) } } - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.cancellationToken = cancellationToken; - this.crossPartitionEnumerator.SetCancellationToken(cancellationToken); - } - public static CrossPartitionChangeFeedAsyncEnumerator Create( IDocumentContainer documentContainer, CrossFeedRangeState state, - ChangeFeedPaginationOptions changeFeedPaginationOptions, - CancellationToken cancellationToken) + ChangeFeedPaginationOptions changeFeedPaginationOptions) { changeFeedPaginationOptions ??= ChangeFeedPaginationOptions.Default; @@ -156,17 +142,14 @@ public static CrossPartitionChangeFeedAsyncEnumerator Create( documentContainer, CrossPartitionChangeFeedAsyncEnumerator.MakeCreateFunction( documentContainer, - changeFeedPaginationOptions, - cancellationToken), + changeFeedPaginationOptions), comparer: default /* this uses a regular queue instead of priority queue */, maxConcurrency: default, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: cancellationToken, state: state); CrossPartitionChangeFeedAsyncEnumerator enumerator = new CrossPartitionChangeFeedAsyncEnumerator( - crossPartitionEnumerator, - cancellationToken); + crossPartitionEnumerator); return enumerator; } @@ -181,11 +164,9 @@ private static bool IsNextRangeEqualToOriginal( private static CreatePartitionRangePageAsyncEnumerator MakeCreateFunction( IChangeFeedDataSource changeFeedDataSource, - ChangeFeedPaginationOptions changeFeedPaginationOptions, - CancellationToken cancellationToken) => (FeedRangeState feedRangeState) => new ChangeFeedPartitionRangePageAsyncEnumerator( + ChangeFeedPaginationOptions changeFeedPaginationOptions) => (FeedRangeState feedRangeState) => new ChangeFeedPartitionRangePageAsyncEnumerator( changeFeedDataSource, feedRangeState, - changeFeedPaginationOptions, - cancellationToken); + changeFeedPaginationOptions); } } diff --git a/Microsoft.Azure.Cosmos/src/Pagination/BufferedPartitionRangePageAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/Pagination/BufferedPartitionRangePageAsyncEnumerator.cs index de689618f6..d79bd2bf7c 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/BufferedPartitionRangePageAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/BufferedPartitionRangePageAsyncEnumerator.cs @@ -15,10 +15,27 @@ internal sealed class BufferedPartitionRangePageAsyncEnumerator : where TState : State { private readonly PartitionRangePageAsyncEnumerator enumerator; - private TryCatch? bufferedPage; + private TryCatch? bufferedPage; + + public override Exception BufferedException + { + get + { + if (this.bufferedPage.HasValue && this.bufferedPage.Value.Failed) + { + return this.bufferedPage.Value.Exception; + } + + return null; + } + } + + public override int BufferedItemCount => this.bufferedPage.HasValue && this.bufferedPage.Value.Succeeded ? + this.bufferedPage.Value.Result.ItemCount : + 0; - public BufferedPartitionRangePageAsyncEnumerator(PartitionRangePageAsyncEnumerator enumerator, CancellationToken cancellationToken) - : base(enumerator.FeedRangeState, cancellationToken) + public BufferedPartitionRangePageAsyncEnumerator(PartitionRangePageAsyncEnumerator enumerator) + : base(enumerator.FeedRangeState) { this.enumerator = enumerator ?? throw new ArgumentNullException(nameof(enumerator)); } @@ -59,15 +76,9 @@ public override async ValueTask PrefetchAsync(ITrace trace, CancellationToken ca using (ITrace prefetchTrace = trace.StartChild("Prefetch", TraceComponent.Pagination, TraceLevel.Info)) { - await this.enumerator.MoveNextAsync(prefetchTrace); + await this.enumerator.MoveNextAsync(prefetchTrace, cancellationToken); this.bufferedPage = this.enumerator.Current; } } - - public override void SetCancellationToken(CancellationToken cancellationToken) - { - base.SetCancellationToken(cancellationToken); - this.enumerator.SetCancellationToken(cancellationToken); - } } } diff --git a/Microsoft.Azure.Cosmos/src/Pagination/BufferedPartitionRangePageAsyncEnumeratorBase.cs b/Microsoft.Azure.Cosmos/src/Pagination/BufferedPartitionRangePageAsyncEnumeratorBase.cs index 67af30c353..d54d3c1c64 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/BufferedPartitionRangePageAsyncEnumeratorBase.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/BufferedPartitionRangePageAsyncEnumeratorBase.cs @@ -3,7 +3,8 @@ // ------------------------------------------------------------ namespace Microsoft.Azure.Cosmos.Pagination -{ +{ + using System; using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.Tracing; @@ -12,10 +13,14 @@ internal abstract class BufferedPartitionRangePageAsyncEnumeratorBase where TState : State { - protected BufferedPartitionRangePageAsyncEnumeratorBase(FeedRangeState feedRangeState, CancellationToken cancellationToken) - : base(feedRangeState, cancellationToken) + protected BufferedPartitionRangePageAsyncEnumeratorBase(FeedRangeState feedRangeState) + : base(feedRangeState) { - } + } + + public abstract Exception BufferedException { get; } + + public abstract int BufferedItemCount { get; } public abstract ValueTask PrefetchAsync(ITrace trace, CancellationToken cancellationToken); } diff --git a/Microsoft.Azure.Cosmos/src/Pagination/CrossFeedRangePage.cs b/Microsoft.Azure.Cosmos/src/Pagination/CrossFeedRangePage.cs index 578e6b6459..675620885b 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/CrossFeedRangePage.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/CrossFeedRangePage.cs @@ -19,8 +19,10 @@ public CrossFeedRangePage(TBackendPage backendEndPage, CrossFeedRangeState this.Page.ItemCount; + protected override ImmutableHashSet DerivedClassBannedHeaders => bannedHeaders; } } diff --git a/Microsoft.Azure.Cosmos/src/Pagination/CrossPartitionRangePageAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/Pagination/CrossPartitionRangePageAsyncEnumerator.cs index a56a21d593..4feb66363e 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/CrossPartitionRangePageAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/CrossPartitionRangePageAsyncEnumerator.cs @@ -26,7 +26,6 @@ internal sealed class CrossPartitionRangePageAsyncEnumerator : IT private readonly IFeedRangeProvider feedRangeProvider; private readonly CreatePartitionRangePageAsyncEnumerator createPartitionRangeEnumerator; private readonly AsyncLazy>> lazyEnumerators; - private CancellationToken cancellationToken; private FeedRangeState? nextState; public CrossPartitionRangePageAsyncEnumerator( @@ -35,12 +34,10 @@ public CrossPartitionRangePageAsyncEnumerator( IComparer> comparer, int? maxConcurrency, PrefetchPolicy prefetchPolicy, - CancellationToken cancellationToken, CrossFeedRangeState state = default) { this.feedRangeProvider = feedRangeProvider ?? throw new ArgumentNullException(nameof(feedRangeProvider)); this.createPartitionRangeEnumerator = createPartitionRangeEnumerator ?? throw new ArgumentNullException(nameof(createPartitionRangeEnumerator)); - this.cancellationToken = cancellationToken; this.lazyEnumerators = new AsyncLazy>>((ITrace trace, CancellationToken token) => InitializeEnumeratorsAsync( @@ -58,7 +55,7 @@ public CrossPartitionRangePageAsyncEnumerator( public FeedRangeInternal CurrentRange { get; private set; } - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (trace == null) { @@ -69,7 +66,7 @@ public async ValueTask MoveNextAsync(ITrace trace) { IQueue> enumerators = await this.lazyEnumerators.GetValueAsync( childTrace, - cancellationToken: this.cancellationToken); + cancellationToken); if (enumerators.Count == 0) { this.Current = default; @@ -79,11 +76,10 @@ public async ValueTask MoveNextAsync(ITrace trace) } PartitionRangePageAsyncEnumerator currentPaginator = enumerators.Dequeue(); - currentPaginator.SetCancellationToken(this.cancellationToken); bool moveNextResult = false; try { - moveNextResult = await currentPaginator.MoveNextAsync(childTrace); + moveNextResult = await currentPaginator.MoveNextAsync(childTrace, cancellationToken); } catch { @@ -96,7 +92,7 @@ public async ValueTask MoveNextAsync(ITrace trace) { // Current enumerator is empty, // so recursively retry on the next enumerator. - return await this.MoveNextAsync(childTrace); + return await this.MoveNextAsync(childTrace, cancellationToken); } if (currentPaginator.Current.Failed) @@ -114,18 +110,18 @@ public async ValueTask MoveNextAsync(ITrace trace) List childRanges = await this.feedRangeProvider.GetChildRangeAsync( currentPaginator.FeedRangeState.FeedRange, childTrace, - this.cancellationToken); + cancellationToken); if (childRanges.Count <= 1) { // We optimistically assumed that the cache is not stale. // In the event that it is (where we only get back one child / the partition that we think got split) // Then we need to refresh the cache - await this.feedRangeProvider.RefreshProviderAsync(childTrace, this.cancellationToken); + await this.feedRangeProvider.RefreshProviderAsync(childTrace, cancellationToken); childRanges = await this.feedRangeProvider.GetChildRangeAsync( currentPaginator.FeedRangeState.FeedRange, childTrace, - this.cancellationToken); + cancellationToken); } if (childRanges.Count < 1) @@ -157,7 +153,7 @@ public async ValueTask MoveNextAsync(ITrace trace) } // Recursively retry - return await this.MoveNextAsync(childTrace); + return await this.MoveNextAsync(childTrace, cancellationToken); } // Just enqueue the paginator and the user can decide if they want to retry. @@ -218,11 +214,6 @@ public bool TryPeekNext(out FeedRangeState nextState) return false; } - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.cancellationToken = cancellationToken; - } - private static bool IsSplitException(Exception exeception) { return exeception is CosmosException cosmosException @@ -304,8 +295,8 @@ private static IReadOnlyList enumerator = createPartitionRangeEnumerator(feedRangeState); BufferedPartitionRangePageAsyncEnumeratorBase bufferedEnumerator = policy switch { - PrefetchPolicy.PrefetchSinglePage => new BufferedPartitionRangePageAsyncEnumerator(enumerator, cancellationToken), - PrefetchPolicy.PrefetchAll => new FullyBufferedPartitionRangeAsyncEnumerator(enumerator, cancellationToken), + PrefetchPolicy.PrefetchSinglePage => new BufferedPartitionRangePageAsyncEnumerator(enumerator), + PrefetchPolicy.PrefetchAll => new FullyBufferedPartitionRangeAsyncEnumerator(enumerator), _ => throw new ArgumentOutOfRangeException(nameof(policy)), }; bufferedEnumerators.Add(bufferedEnumerator); diff --git a/Microsoft.Azure.Cosmos/src/Pagination/FullyBufferedPartitionRangeAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/Pagination/FullyBufferedPartitionRangeAsyncEnumerator.cs index d9da53e15c..d029fa667e 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/FullyBufferedPartitionRangeAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/FullyBufferedPartitionRangeAsyncEnumerator.cs @@ -16,16 +16,31 @@ internal sealed class FullyBufferedPartitionRangeAsyncEnumerator { private readonly PartitionRangePageAsyncEnumerator enumerator; private readonly List bufferedPages; - private int currentIndex; + private int currentIndex; + private int bufferedItemCount; private Exception exception; - private bool HasPrefetched => (this.exception != null) || (this.bufferedPages.Count > 0); + private bool hasPrefetched; + + public override Exception BufferedException => this.exception; + + public override int BufferedItemCount => this.bufferedItemCount; - public FullyBufferedPartitionRangeAsyncEnumerator(PartitionRangePageAsyncEnumerator enumerator, CancellationToken cancellationToken) - : base(enumerator.FeedRangeState, cancellationToken) + public FullyBufferedPartitionRangeAsyncEnumerator(PartitionRangePageAsyncEnumerator enumerator) + : this(enumerator, null) + { + } + + public FullyBufferedPartitionRangeAsyncEnumerator(PartitionRangePageAsyncEnumerator enumerator, IReadOnlyList bufferedPages) + : base(enumerator.FeedRangeState) { this.enumerator = enumerator ?? throw new ArgumentNullException(nameof(enumerator)); - this.bufferedPages = new List(); + this.bufferedPages = new List(); + + if (bufferedPages != null) + { + this.bufferedPages.AddRange(bufferedPages); + } } public override ValueTask DisposeAsync() @@ -40,7 +55,7 @@ public override async ValueTask PrefetchAsync(ITrace trace, CancellationToken ca throw new ArgumentNullException(nameof(trace)); } - if (this.HasPrefetched) + if (this.hasPrefetched) { return; } @@ -49,13 +64,14 @@ public override async ValueTask PrefetchAsync(ITrace trace, CancellationToken ca using (ITrace prefetchTrace = trace.StartChild("Prefetch", TraceComponent.Pagination, TraceLevel.Info)) { - while (await this.enumerator.MoveNextAsync(prefetchTrace)) + while (await this.enumerator.MoveNextAsync(prefetchTrace, cancellationToken)) { cancellationToken.ThrowIfCancellationRequested(); TryCatch current = this.enumerator.Current; if (current.Succeeded) { - this.bufferedPages.Add(current.Result); + this.bufferedPages.Add(current.Result); + this.bufferedItemCount += current.Result.ItemCount; } else { @@ -63,7 +79,9 @@ public override async ValueTask PrefetchAsync(ITrace trace, CancellationToken ca break; } } - } + } + + this.hasPrefetched = true; } protected override async Task> GetNextPageAsync(ITrace trace, CancellationToken cancellationToken) @@ -79,18 +97,12 @@ protected override async Task> GetNextPageAsync(ITrace trace, Ca } else { - await this.enumerator.MoveNextAsync(trace); + await this.enumerator.MoveNextAsync(trace, cancellationToken); result = this.enumerator.Current; } ++this.currentIndex; return result; } - - public override void SetCancellationToken(CancellationToken cancellationToken) - { - base.SetCancellationToken(cancellationToken); - this.enumerator.SetCancellationToken(cancellationToken); - } } } diff --git a/Microsoft.Azure.Cosmos/src/Pagination/ITracingAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/Pagination/ITracingAsyncEnumerator.cs index a4c232af48..d9c656a2e2 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/ITracingAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/ITracingAsyncEnumerator.cs @@ -4,7 +4,8 @@ namespace Microsoft.Azure.Cosmos.Pagination { - using System; + using System; + using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.Tracing; @@ -12,6 +13,6 @@ internal interface ITracingAsyncEnumerator : IAsyncDisposable { T Current { get; } - ValueTask MoveNextAsync(ITrace trace); + ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken); } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Pagination/NetworkAttachedDocumentContainer.cs b/Microsoft.Azure.Cosmos/src/Pagination/NetworkAttachedDocumentContainer.cs index 8420cb0540..eb87821663 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/NetworkAttachedDocumentContainer.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/NetworkAttachedDocumentContainer.cs @@ -206,7 +206,8 @@ public async Task> MonadicReadFeedAsync( if (responseMessage.StatusCode == HttpStatusCode.OK) { double requestCharge = responseMessage.Headers.RequestCharge; - string activityId = responseMessage.Headers.ActivityId; + string activityId = responseMessage.Headers.ActivityId; + int itemCount = int.Parse(responseMessage.Headers.ItemCount); ReadFeedState state = responseMessage.Headers.ContinuationToken != null ? ReadFeedState.Continuation(CosmosString.Create(responseMessage.Headers.ContinuationToken)) : null; Dictionary additionalHeaders = GetAdditionalHeaders( responseMessage.Headers.CosmosMessageHeaders, @@ -214,7 +215,8 @@ public async Task> MonadicReadFeedAsync( ReadFeedPage readFeedPage = new ReadFeedPage( responseMessage.Content, - requestCharge, + requestCharge, + itemCount, activityId, additionalHeaders, state); @@ -327,7 +329,8 @@ public async Task> MonadicChangeFeedAsync( if (pageHasResult) { double requestCharge = responseMessage.Headers.RequestCharge; - string activityId = responseMessage.Headers.ActivityId; + string activityId = responseMessage.Headers.ActivityId; + int itemCount = int.Parse(responseMessage.Headers.ItemCount); ChangeFeedState state = ChangeFeedState.Continuation(CosmosString.Create(responseMessage.Headers.ETag)); Dictionary additionalHeaders = GetAdditionalHeaders( responseMessage.Headers.CosmosMessageHeaders, @@ -338,7 +341,8 @@ public async Task> MonadicChangeFeedAsync( { changeFeedPage = new ChangeFeedSuccessPage( responseMessage.Content, - requestCharge, + requestCharge, + itemCount, activityId, additionalHeaders, state); diff --git a/Microsoft.Azure.Cosmos/src/Pagination/Page.cs b/Microsoft.Azure.Cosmos/src/Pagination/Page.cs index ee0db8af85..f3ed29ebf4 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/Page.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/Page.cs @@ -17,15 +17,15 @@ internal abstract class Page Microsoft.Azure.Documents.HttpConstants.HttpHeaders.ActivityId, }.ToImmutableHashSet(); - private static readonly IReadOnlyDictionary EmptyDictionary = new Dictionary(); + private static readonly IReadOnlyDictionary EmptyDictionary = new Dictionary().ToImmutableDictionary(); protected Page( - double requestCharge, + double requestCharge, string activityId, IReadOnlyDictionary additionalHeaders, TState state) { - this.RequestCharge = requestCharge < 0 ? throw new ArgumentOutOfRangeException(nameof(requestCharge)) : requestCharge; + this.RequestCharge = requestCharge < 0 ? throw new ArgumentOutOfRangeException(nameof(requestCharge)) : requestCharge; this.ActivityId = activityId; this.State = state; @@ -47,7 +47,9 @@ protected Page( this.AdditionalHeaders = additionalHeaders ?? EmptyDictionary; } - public double RequestCharge { get; } + public double RequestCharge { get; } + + public abstract int ItemCount { get; } public string ActivityId { get; } diff --git a/Microsoft.Azure.Cosmos/src/Pagination/PaginationOptions.cs b/Microsoft.Azure.Cosmos/src/Pagination/PaginationOptions.cs index 593d761ec0..ac50876ab8 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/PaginationOptions.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/PaginationOptions.cs @@ -7,7 +7,6 @@ namespace Microsoft.Azure.Cosmos.Pagination using System; using System.Collections.Generic; using System.Collections.Immutable; - using Microsoft.Azure.Cosmos.Json; using Microsoft.Azure.Documents; internal abstract class PaginationOptions @@ -23,10 +22,10 @@ internal abstract class PaginationOptions protected PaginationOptions( int? pageSizeLimit = null, - Dictionary additionalHeaders = null) + IReadOnlyDictionary additionalHeaders = null) { this.PageSizeLimit = pageSizeLimit; - this.AdditionalHeaders = additionalHeaders != null ? additionalHeaders.ToImmutableDictionary() : EmptyDictionary; + this.AdditionalHeaders = additionalHeaders ?? EmptyDictionary; foreach (string key in this.AdditionalHeaders.Keys) { @@ -39,7 +38,7 @@ protected PaginationOptions( public int? PageSizeLimit { get; } - public ImmutableDictionary AdditionalHeaders { get; } + public IReadOnlyDictionary AdditionalHeaders { get; } protected abstract ImmutableHashSet BannedAdditionalHeaders { get; } } diff --git a/Microsoft.Azure.Cosmos/src/Pagination/PartitionRangePageAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/Pagination/PartitionRangePageAsyncEnumerator.cs index ac9b56d5ee..1dc017527f 100644 --- a/Microsoft.Azure.Cosmos/src/Pagination/PartitionRangePageAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/PartitionRangePageAsyncEnumerator.cs @@ -18,12 +18,9 @@ internal abstract class PartitionRangePageAsyncEnumerator : ITrac where TPage : Page where TState : State { - private CancellationToken cancellationToken; - - protected PartitionRangePageAsyncEnumerator(FeedRangeState feedRangeState, CancellationToken cancellationToken) + protected PartitionRangePageAsyncEnumerator(FeedRangeState feedRangeState) { this.FeedRangeState = feedRangeState; - this.cancellationToken = cancellationToken; } public FeedRangeState FeedRangeState { get; private set; } @@ -34,7 +31,7 @@ protected PartitionRangePageAsyncEnumerator(FeedRangeState feedRangeStat private bool HasMoreResults => !this.HasStarted || (this.FeedRangeState.State != default); - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (trace == null) { @@ -48,7 +45,7 @@ public async ValueTask MoveNextAsync(ITrace trace) return false; } - this.Current = await this.GetNextPageAsync(trace: childTrace, cancellationToken: this.cancellationToken); + this.Current = await this.GetNextPageAsync(trace: childTrace, cancellationToken); if (this.Current.Succeeded) { this.FeedRangeState = new FeedRangeState(this.FeedRangeState.FeedRange, this.Current.Result.State); @@ -62,10 +59,5 @@ public async ValueTask MoveNextAsync(ITrace trace) protected abstract Task> GetNextPageAsync(ITrace trace, CancellationToken cancellationToken); public abstract ValueTask DisposeAsync(); - - public virtual void SetCancellationToken(CancellationToken cancellationToken) - { - this.cancellationToken = cancellationToken; - } } } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/TracingAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/Pagination/TracingAsyncEnumerator.cs similarity index 75% rename from Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/TracingAsyncEnumerator.cs rename to Microsoft.Azure.Cosmos/src/Pagination/TracingAsyncEnumerator.cs index e37b87a978..b4af0ea01f 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/TracingAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/Pagination/TracingAsyncEnumerator.cs @@ -2,10 +2,11 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // ------------------------------------------------------------ -namespace Microsoft.Azure.Cosmos.Tests.Query.Pipeline +namespace Microsoft.Azure.Cosmos.Pagination { using System; - using System.Collections.Generic; + using System.Collections.Generic; + using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.Pagination; using Microsoft.Azure.Cosmos.Tracing; @@ -13,12 +14,14 @@ namespace Microsoft.Azure.Cosmos.Tests.Query.Pipeline internal sealed class TracingAsyncEnumerator : IAsyncEnumerator { private readonly ITracingAsyncEnumerator enumerator; - private readonly ITrace trace; + private readonly ITrace trace; + private readonly CancellationToken cancellationToken; - public TracingAsyncEnumerator(ITracingAsyncEnumerator enumerator, ITrace trace) + public TracingAsyncEnumerator(ITracingAsyncEnumerator enumerator, ITrace trace, CancellationToken cancellationToken) { this.enumerator = enumerator ?? throw new ArgumentNullException(nameof(enumerator)); - this.trace = trace ?? throw new ArgumentNullException(nameof(trace)); + this.trace = trace ?? throw new ArgumentNullException(nameof(trace)); + this.cancellationToken = cancellationToken; } public T Current => this.enumerator.Current; @@ -30,7 +33,7 @@ public ValueTask DisposeAsync() public ValueTask MoveNextAsync() { - return this.enumerator.MoveNextAsync(this.trace); + return this.enumerator.MoveNextAsync(this.trace, this.cancellationToken); } } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.Client.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.Client.cs index c1fff1ac59..7966918fcc 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.Client.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.Client.cs @@ -13,7 +13,7 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.Aggregate using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Aggregate.Aggregators; using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; using Microsoft.Azure.Cosmos.Tracing; - using Microsoft.Azure.Documents; + using static IndexUtilizationHelper; internal abstract partial class AggregateQueryPipelineStage : QueryPipelineStageBase { @@ -22,9 +22,8 @@ private sealed class ClientAggregateQueryPipelineStage : AggregateQueryPipelineS private ClientAggregateQueryPipelineStage( IQueryPipelineStage source, SingleGroupAggregator singleGroupAggregator, - bool isValueAggregateQuery, - CancellationToken cancellationToken) - : base(source, singleGroupAggregator, isValueAggregateQuery, cancellationToken) + bool isValueAggregateQuery) + : base(source, singleGroupAggregator, isValueAggregateQuery) { // all the work is done in the base constructor. } @@ -35,7 +34,6 @@ public static TryCatch MonadicCreate( IReadOnlyList orderedAliases, bool hasSelectValue, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { if (monadicCreatePipelineStage == null) @@ -54,7 +52,7 @@ public static TryCatch MonadicCreate( return TryCatch.FromException(tryCreateSingleGroupAggregator.Exception); } - TryCatch tryCreateSource = monadicCreatePipelineStage(continuationToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(continuationToken); if (tryCreateSource.Failed) { return tryCreateSource; @@ -63,15 +61,14 @@ public static TryCatch MonadicCreate( ClientAggregateQueryPipelineStage stage = new ClientAggregateQueryPipelineStage( tryCreateSource.Result, tryCreateSingleGroupAggregator.Result, - hasSelectValue, - cancellationToken); + hasSelectValue); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { @@ -88,10 +85,9 @@ public override async ValueTask MoveNextAsync(ITrace trace) // but then we will have to design a continuation token. double requestCharge = 0; - long responseLengthBytes = 0; IReadOnlyDictionary cumulativeAdditionalHeaders = default; - while (await this.inputStage.MoveNextAsync(trace)) + while (await this.inputStage.MoveNextAsync(trace, cancellationToken)) { TryCatch tryGetPageFromSource = this.inputStage.Current; if (tryGetPageFromSource.Failed) @@ -103,19 +99,14 @@ public override async ValueTask MoveNextAsync(ITrace trace) QueryPage sourcePage = tryGetPageFromSource.Result; requestCharge += sourcePage.RequestCharge; - responseLengthBytes += sourcePage.ResponseLengthInBytes; - // Note-2024-02-02: - // Here the IndexMetrics headers are non-accumulative, so we are copying that header from the source page. - // Other headers might need similar traeatment, and it's up to the area owner to implement that here. - if (sourcePage.AdditionalHeaders.ContainsKey(HttpConstants.HttpHeaders.IndexUtilization)) - { - cumulativeAdditionalHeaders = new Dictionary() {{ HttpConstants.HttpHeaders.IndexUtilization, sourcePage.AdditionalHeaders[HttpConstants.HttpHeaders.IndexUtilization] }}; - } + cumulativeAdditionalHeaders = AccumulateIndexUtilization( + cumulativeHeaders: cumulativeAdditionalHeaders, + currentHeaders: sourcePage.AdditionalHeaders); foreach (CosmosElement element in sourcePage.Documents) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); RewrittenAggregateProjections rewrittenAggregateProjections = new RewrittenAggregateProjections( this.isValueQuery, @@ -135,12 +126,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: finalResult, requestCharge: requestCharge, activityId: default, - responseLengthInBytes: responseLengthBytes, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: cumulativeAdditionalHeaders, - state: default); + state: default, + streaming: default); this.Current = TryCatch.FromResult(queryPage); this.returnedFinalPage = true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.Compute.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.Compute.cs index 8a34e69a8a..fb938b530d 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.Compute.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.Compute.cs @@ -26,9 +26,8 @@ private sealed class ComputeAggregateQueryPipelineStage : AggregateQueryPipeline private ComputeAggregateQueryPipelineStage( IQueryPipelineStage source, SingleGroupAggregator singleGroupAggregator, - bool isValueAggregateQuery, - CancellationToken cancellationToken) - : base(source, singleGroupAggregator, isValueAggregateQuery, cancellationToken) + bool isValueAggregateQuery) + : base(source, singleGroupAggregator, isValueAggregateQuery) { // all the work is done in the base constructor. } @@ -39,11 +38,8 @@ public static TryCatch MonadicCreate( IReadOnlyList orderedAliases, bool hasSelectValue, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { - cancellationToken.ThrowIfCancellationRequested(); - AggregateContinuationToken aggregateContinuationToken; if (continuationToken != null) { @@ -79,7 +75,7 @@ public static TryCatch MonadicCreate( } else { - tryCreateSource = monadicCreatePipelineStage(aggregateContinuationToken.SourceContinuationToken, cancellationToken); + tryCreateSource = monadicCreatePipelineStage(aggregateContinuationToken.SourceContinuationToken); } if (tryCreateSource.Failed) @@ -90,15 +86,14 @@ public static TryCatch MonadicCreate( ComputeAggregateQueryPipelineStage stage = new ComputeAggregateQueryPipelineStage( tryCreateSource.Result, tryCreateSingleGroupAggregator.Result, - hasSelectValue, - cancellationToken); + hasSelectValue); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { @@ -113,7 +108,7 @@ public override async ValueTask MoveNextAsync(ITrace trace) // Draining aggregates is broken down into two stages QueryPage queryPage; - if (await this.inputStage.MoveNextAsync(trace)) + if (await this.inputStage.MoveNextAsync(trace, cancellationToken)) { // Stage 1: // Drain the aggregates fully from all continuations and all partitions @@ -128,7 +123,7 @@ public override async ValueTask MoveNextAsync(ITrace trace) QueryPage sourcePage = tryGetSourcePage.Result; foreach (CosmosElement element in sourcePage.Documents) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); RewrittenAggregateProjections rewrittenAggregateProjections = new RewrittenAggregateProjections( this.isValueQuery, @@ -144,12 +139,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: EmptyResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: queryState); + state: queryState, + streaming: sourcePage.Streaming); queryPage = emptyPage; } @@ -168,12 +163,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: finalResult, requestCharge: default, activityId: default, - responseLengthInBytes: default, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: default, - state: default); + state: default, + streaming: default); queryPage = finalPage; this.returnedFinalPage = true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.cs index 0146c3b15e..cf681d0d4d 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Aggregate/AggregateQueryPipelineStage.cs @@ -46,14 +46,12 @@ internal abstract partial class AggregateQueryPipelineStage : QueryPipelineStage /// The source component that will supply the local aggregates from multiple continuations and partitions. /// The single group aggregator that we will feed results into. /// Whether or not the query has the 'VALUE' keyword. - /// The cancellation token for cooperative yeilding. /// This constructor is private since there is some async initialization that needs to happen in CreateAsync(). public AggregateQueryPipelineStage( IQueryPipelineStage source, SingleGroupAggregator singleGroupAggregator, - bool isValueQuery, - CancellationToken cancellationToken) - : base(source, cancellationToken) + bool isValueQuery) + : base(source) { this.singleGroupAggregator = singleGroupAggregator ?? throw new ArgumentNullException(nameof(singleGroupAggregator)); this.isValueQuery = isValueQuery; @@ -66,7 +64,6 @@ public static TryCatch MonadicCreate( IReadOnlyList orderedAliases, bool hasSelectValue, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) => executionEnvironment switch { ExecutionEnvironment.Client => ClientAggregateQueryPipelineStage.MonadicCreate( @@ -75,7 +72,6 @@ public static TryCatch MonadicCreate( orderedAliases, hasSelectValue, continuationToken, - cancellationToken, monadicCreatePipelineStage), ExecutionEnvironment.Compute => ComputeAggregateQueryPipelineStage.MonadicCreate( aggregates, @@ -83,7 +79,6 @@ public static TryCatch MonadicCreate( orderedAliases, hasSelectValue, continuationToken, - cancellationToken, monadicCreatePipelineStage), _ => throw new ArgumentException($"Unknown {nameof(ExecutionEnvironment)}: {executionEnvironment}."), }; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CatchAllQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CatchAllQueryPipelineStage.cs index 827fc1c02c..74c953a93e 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CatchAllQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CatchAllQueryPipelineStage.cs @@ -13,12 +13,12 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline internal sealed class CatchAllQueryPipelineStage : QueryPipelineStageBase { - public CatchAllQueryPipelineStage(IQueryPipelineStage inputStage, CancellationToken cancellationToken) - : base(inputStage, cancellationToken) + public CatchAllQueryPipelineStage(IQueryPipelineStage inputStage) + : base(inputStage) { } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (trace == null) { @@ -27,7 +27,7 @@ public override async ValueTask MoveNextAsync(ITrace trace) try { - if (!await this.inputStage.MoveNextAsync(trace)) + if (!await this.inputStage.MoveNextAsync(trace, cancellationToken)) { this.Current = default; return false; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CosmosQueryExecutionContextFactory.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CosmosQueryExecutionContextFactory.cs index e7a65b7457..fdca4ee452 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CosmosQueryExecutionContextFactory.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CosmosQueryExecutionContextFactory.cs @@ -19,7 +19,6 @@ namespace Microsoft.Azure.Cosmos.Query.Core.ExecutionContext using Microsoft.Azure.Cosmos.Query.Core.Parser; using Microsoft.Azure.Cosmos.Query.Core.Pipeline; using Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.Parallel; - using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Distinct; using Microsoft.Azure.Cosmos.Query.Core.Pipeline.OptimisticDirectExecutionQuery; using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Tokens; @@ -68,18 +67,18 @@ public static IQueryPipelineStage Create( { // Query Iterator requires that the creation of the query context is deferred until the user calls ReadNextAsync AsyncLazy> lazyTryCreateStage = new AsyncLazy>( - valueFactory: (trace, innerCancellationToken) => CosmosQueryExecutionContextFactory.TryCreateCoreContextAsync( + valueFactory: (trace, innerCancellationToken) => TryCreateCoreContextAsync( documentContainer, cosmosQueryContext, inputParameters, trace, innerCancellationToken)); - LazyQueryPipelineStage lazyQueryPipelineStage = new LazyQueryPipelineStage(lazyTryCreateStage: lazyTryCreateStage, cancellationToken: default); + LazyQueryPipelineStage lazyQueryPipelineStage = new LazyQueryPipelineStage(lazyTryCreateStage: lazyTryCreateStage); return lazyQueryPipelineStage; }); - CatchAllQueryPipelineStage catchAllQueryPipelineStage = new CatchAllQueryPipelineStage(nameCacheStaleRetryQueryPipelineStage, cancellationToken: default); + CatchAllQueryPipelineStage catchAllQueryPipelineStage = new CatchAllQueryPipelineStage(nameCacheStaleRetryQueryPipelineStage); return catchAllQueryPipelineStage; } @@ -201,11 +200,10 @@ private static async Task> TryCreateCoreContextAsy forceRefresh: false, createQueryPipelineTrace); - return CosmosQueryExecutionContextFactory.TryCreatePassthroughQueryExecutionContext( + return TryCreatePassthroughQueryExecutionContext( documentContainer, inputParameters, - targetRanges, - cancellationToken); + targetRanges); } } } @@ -240,7 +238,7 @@ private static async Task> TryCreateFromPartitione { cancellationToken.ThrowIfCancellationRequested(); - List targetRanges = await CosmosQueryExecutionContextFactory.GetTargetPartitionKeyRangesAsync( + List targetRanges = await GetTargetPartitionKeyRangesAsync( cosmosQueryContext.QueryClient, cosmosQueryContext.ResourceLink, partitionedQueryExecutionInfo, @@ -291,15 +289,19 @@ private static async Task> TryCreateFromPartitione { SetTestInjectionPipelineType(inputParameters, Passthrough); - tryCreatePipelineStage = CosmosQueryExecutionContextFactory.TryCreatePassthroughQueryExecutionContext( + tryCreatePipelineStage = TryCreatePassthroughQueryExecutionContext( documentContainer, inputParameters, - targetRanges, - cancellationToken); + targetRanges); } else { - tryCreatePipelineStage = TryCreateSpecializedDocumentQueryExecutionContext(documentContainer, cosmosQueryContext, inputParameters, targetRanges, partitionedQueryExecutionInfo, cancellationToken); + tryCreatePipelineStage = TryCreateSpecializedDocumentQueryExecutionContext( + documentContainer, + cosmosQueryContext, + inputParameters, + targetRanges, + partitionedQueryExecutionInfo); } } @@ -330,7 +332,7 @@ private static async Task> TryCreateSinglePartitio // Test code added to confirm the correct pipeline is being utilized SetTestInjectionPipelineType(inputParameters, OptimisticDirectExecution); - TryCatch tryCreatePipelineStage = CosmosQueryExecutionContextFactory.TryCreateOptimisticDirectExecutionContext( + TryCatch tryCreatePipelineStage = TryCreateOptimisticDirectExecutionContext( documentContainer, cosmosQueryContext, containerQueryProperties, @@ -359,8 +361,7 @@ private static async Task> TryCreateSinglePartitio cosmosQueryContext, inputParameters, targetRanges, - partitionedQueryExecutionInfo, - cancellationToken); + partitionedQueryExecutionInfo); } else { @@ -382,8 +383,7 @@ private static TryCatch TryCreateSpecializedDocumentQueryEx CosmosQueryContext cosmosQueryContext, InputParameters inputParameters, List targetRanges, - PartitionedQueryExecutionInfo partitionedQueryExecutionInfo, - CancellationToken cancellationToken) + PartitionedQueryExecutionInfo partitionedQueryExecutionInfo) { SetTestInjectionPipelineType(inputParameters, Specialized); @@ -412,13 +412,12 @@ private static TryCatch TryCreateSpecializedDocumentQueryEx inputParameters.TestInjections); } - return CosmosQueryExecutionContextFactory.TryCreateSpecializedDocumentQueryExecutionContext( + return TryCreateSpecializedDocumentQueryExecutionContext( documentContainer, cosmosQueryContext, inputParameters, partitionedQueryExecutionInfo, - targetRanges, - cancellationToken); + targetRanges); } private static async Task> TryCreateSpecializedDocumentQueryExecutionContextAsync( @@ -436,7 +435,7 @@ private static async Task> TryCreateSpecializedDoc trace, cancellationToken); - List targetRanges = await CosmosQueryExecutionContextFactory.GetTargetPartitionKeyRangesAsync( + List targetRanges = await GetTargetPartitionKeyRangesAsync( cosmosQueryContext.QueryClient, cosmosQueryContext.ResourceLink, partitionedQueryExecutionInfo, @@ -450,8 +449,7 @@ private static async Task> TryCreateSpecializedDoc cosmosQueryContext, inputParameters, targetRanges, - partitionedQueryExecutionInfo, - cancellationToken); + partitionedQueryExecutionInfo); } private static TryCatch TryCreateOptimisticDirectExecutionContext( @@ -467,28 +465,22 @@ private static TryCatch TryCreateOptimisticDirectExecutionC documentContainer: documentContainer, inputParameters: inputParameters, targetRange: new FeedRangeEpk(targetRange.ToRange()), - fallbackQueryPipelineStageFactory: (continuationToken) => - { - // In fallback scenario, the Specialized pipeline is always invoked - Task> tryCreateContext = - CosmosQueryExecutionContextFactory.TryCreateSpecializedDocumentQueryExecutionContextAsync( - documentContainer, - cosmosQueryContext, - containerQueryProperties, - inputParameters.WithContinuationToken(continuationToken), - NoOpTrace.Singleton, - default); - - return tryCreateContext; - }, + fallbackQueryPipelineStageFactory: (continuationToken) => + // In fallback scenario, the Specialized pipeline is always invoked + TryCreateSpecializedDocumentQueryExecutionContextAsync( + documentContainer, + cosmosQueryContext, + containerQueryProperties, + inputParameters.WithContinuationToken(continuationToken), + NoOpTrace.Singleton, + cancellationToken), cancellationToken: cancellationToken); } private static TryCatch TryCreatePassthroughQueryExecutionContext( DocumentContainer documentContainer, InputParameters inputParameters, - List targetRanges, - CancellationToken cancellationToken) + List targetRanges) { // Return a parallel context, since we still want to be able to handle splits and concurrency / buffering. return ParallelCrossPartitionQueryPipelineStage.MonadicCreate( @@ -507,7 +499,6 @@ private static TryCatch TryCreatePassthroughQueryExecutionC partitionKey: inputParameters.PartitionKey, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, maxConcurrency: inputParameters.MaxConcurrency, - cancellationToken: cancellationToken, continuationToken: inputParameters.InitialUserContinuationToken); } @@ -516,8 +507,7 @@ private static TryCatch TryCreateSpecializedDocumentQueryEx CosmosQueryContext cosmosQueryContext, InputParameters inputParameters, PartitionedQueryExecutionInfo partitionedQueryExecutionInfo, - List targetRanges, - CancellationToken cancellationToken) + List targetRanges) { QueryInfo queryInfo = partitionedQueryExecutionInfo.QueryInfo; @@ -577,8 +567,7 @@ private static TryCatch TryCreateSpecializedDocumentQueryEx queryPaginationOptions: new QueryPaginationOptions( pageSizeHint: (int)optimalPageSize), maxConcurrency: inputParameters.MaxConcurrency, - requestContinuationToken: inputParameters.InitialUserContinuationToken, - requestCancellationToken: cancellationToken); + requestContinuationToken: inputParameters.InitialUserContinuationToken); } private static async Task GetPartitionedQueryExecutionInfoAsync( @@ -784,7 +773,7 @@ private static Documents.PartitionKeyDefinition GetPartitionKeyDefinition(InputP List targetRanges; if (partitionedQueryExecutionInfo != null || inputParameters.InitialFeedRange != null) { - targetRanges = await CosmosQueryExecutionContextFactory.GetTargetPartitionKeyRangesAsync( + targetRanges = await GetTargetPartitionKeyRangesAsync( cosmosQueryContext.QueryClient, cosmosQueryContext.ResourceLink, partitionedQueryExecutionInfo, diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByCrossPartitionEnumerator.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByCrossPartitionEnumerator.cs new file mode 100644 index 0000000000..968b1e1c84 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByCrossPartitionEnumerator.cs @@ -0,0 +1,206 @@ +// ------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +// ------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Pagination +{ + using System; + using System.Collections; + using System.Collections.Generic; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.CosmosElements; + using Microsoft.Azure.Cosmos.Query.Core.Collections; + using Microsoft.Azure.Cosmos.Query.Core.Monads; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy; + using Microsoft.Azure.Cosmos.Tracing; + + internal sealed class OrderByCrossPartitionEnumerator : IEnumerator + { + private readonly PriorityQueue> queue; + + private bool started; + + public OrderByQueryResult Current => this.queue.Peek().Current; + + object IEnumerator.Current => this.Current; + + public OrderByCrossPartitionEnumerator(PriorityQueue> queue) + { + this.queue = queue ?? throw new ArgumentNullException(nameof(queue)); + } + + public static async Task<(IEnumerator orderbyQueryResultEnumerator, double totalRequestCharge)> CreateAsync( + IEnumerable enumerators, + IComparer comparer, + int levelSize, + ITrace trace, + CancellationToken cancellationToken) + { + if (enumerators == null) + { + throw new ArgumentNullException(nameof(enumerators)); + } + + if (comparer == null) + { + throw new ArgumentNullException(nameof(comparer)); + } + + double totalRequestCharge = 0; + EnumeratorComparer enumeratorComparer = new EnumeratorComparer(comparer); + PriorityQueue> queue = new PriorityQueue>(enumeratorComparer); + foreach (ITracingAsyncEnumerator> enumerator in enumerators) + { + while (await enumerator.MoveNextAsync(trace, cancellationToken)) + { + TryCatch currentPage = enumerator.Current; + if (currentPage.Failed) + { + throw currentPage.Exception; + } + + totalRequestCharge += currentPage.Result.RequestCharge; + IReadOnlyList page = currentPage.Result.Page.Documents; + + if (page.Count > 0) + { + PageEnumerator pageEnumerator = new PageEnumerator(page); + pageEnumerator.MoveNext(); + + queue.Enqueue(pageEnumerator); + + if (queue.Count >= levelSize) + { + OrderByCrossPartitionEnumerator newEnumerator = new OrderByCrossPartitionEnumerator(queue); + newEnumerator.MoveNext(); + + queue = new PriorityQueue>(enumeratorComparer); + queue.Enqueue(newEnumerator); + } + } + } + } + + if (queue.Count == 0) + { + return (EmptyEnumerator.Instance, totalRequestCharge); + } + + return (new OrderByCrossPartitionEnumerator(queue), totalRequestCharge); + } + + public bool MoveNext() + { + if (this.queue.Count == 0) + { + return false; + } + + if (!this.started) + { + // We never start empty + this.started = true; + return true; + } + + IEnumerator enumerator = this.queue.Dequeue(); + if (enumerator.MoveNext()) + { + this.queue.Enqueue(enumerator); + } + + return this.queue.Count > 0; + } + + public void Reset() + { + throw new NotSupportedException(); + } + + public void Dispose() + { + while (this.queue.Count > 0) + { + IEnumerator enumerator = this.queue.Dequeue(); + enumerator.Dispose(); + } + } + + private sealed class EmptyEnumerator : IEnumerator + { + public static readonly EmptyEnumerator Instance = new EmptyEnumerator(); + + public OrderByQueryResult Current => throw new InvalidOperationException(); + + object IEnumerator.Current => this.Current; + + private EmptyEnumerator() + { + } + + public bool MoveNext() + { + return false; + } + + public void Reset() + { + } + + public void Dispose() + { + } + } + + private sealed class EnumeratorComparer : IComparer> + { + private readonly IComparer comparer; + + public EnumeratorComparer(IComparer comparer) + { + this.comparer = comparer ?? throw new ArgumentNullException(nameof(comparer)); + } + + public int Compare(IEnumerator x, IEnumerator y) + { + return this.comparer.Compare(x.Current, y.Current); + } + } + + private sealed class PageEnumerator : IEnumerator + { + private readonly IEnumerator enumerator; + + public OrderByQueryResult Current { get; private set; } + + object IEnumerator.Current => this.Current; + + public PageEnumerator(IReadOnlyList page) + { + this.enumerator = page?.GetEnumerator() ?? throw new ArgumentNullException(nameof(page)); + } + + public bool MoveNext() + { + if (this.enumerator.MoveNext()) + { + this.Current = new OrderByQueryResult(this.enumerator.Current); + return true; + } + + return false; + } + + public void Reset() + { + this.enumerator.Reset(); + } + + public void Dispose() + { + this.enumerator.Dispose(); + } + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByCrossPartitionQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByCrossPartitionQueryPipelineStage.cs index f22eb8a6af..edd2fa7be7 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByCrossPartitionQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByCrossPartitionQueryPipelineStage.cs @@ -1,1540 +1,2015 @@ -// ------------------------------------------------------------ -// Copyright (c) Microsoft Corporation. All rights reserved. -// ------------------------------------------------------------ - -namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy -{ - using System; - using System.Collections.Generic; - using System.Linq; - using System.Linq.Expressions; - using System.Net; - using System.Text; - using System.Threading; - using System.Threading.Tasks; - using Microsoft.Azure.Cosmos.CosmosElements; - using Microsoft.Azure.Cosmos.CosmosElements.Numbers; - using Microsoft.Azure.Cosmos.Pagination; - using Microsoft.Azure.Cosmos.Query.Core.Collections; - using Microsoft.Azure.Cosmos.Query.Core.Exceptions; - using Microsoft.Azure.Cosmos.Query.Core.Monads; - using Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.Parallel; - using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; - using Microsoft.Azure.Cosmos.Query.Core.QueryClient; - using Microsoft.Azure.Cosmos.Tracing; - using ResourceId = Documents.ResourceId; - - /// - /// CosmosOrderByItemQueryExecutionContext is a concrete implementation for CrossPartitionQueryExecutionContext. - /// This class is responsible for draining cross partition queries that have order by conditions. - /// The way order by queries work is that they are doing a k-way merge of sorted lists from each partition with an added condition. - /// The added condition is that if 2 or more top documents from different partitions are equivalent then we drain from the left most partition first. - /// This way we can generate a single continuation token for all n partitions. - /// This class is able to stop and resume execution by generating continuation tokens and reconstructing an execution context from said token. - /// - internal sealed class OrderByCrossPartitionQueryPipelineStage : IQueryPipelineStage - { - /// - /// Order by queries are rewritten to allow us to inject a filter. - /// This placeholder is so that we can just string replace it with the filter we want without having to understand the structure of the query. - /// - private const string FormatPlaceHolder = "{documentdb-formattableorderbyquery-filter}"; - - /// - /// If query does not need a filter then we replace the FormatPlaceHolder with "true", since - /// "SELECT * FROM c WHERE blah and true" is the same as "SELECT * FROM c where blah" - /// - private const string TrueFilter = "true"; - - private static readonly QueryState InitializingQueryState = new QueryState(CosmosString.Create("ORDER BY NOT INITIALIZED YET!")); - private static readonly IReadOnlyList EmptyPage = new List(); - - private readonly IDocumentContainer documentContainer; - private readonly IReadOnlyList sortOrders; - private readonly PriorityQueue enumerators; - private readonly Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)> uninitializedEnumeratorsAndTokens; - private readonly QueryPaginationOptions queryPaginationOptions; - private readonly int maxConcurrency; - - private CancellationToken cancellationToken; - private QueryState state; - private bool returnedFinalPage; - - private static class Expressions - { - public const string LessThan = "<"; - public const string LessThanOrEqualTo = "<="; - public const string EqualTo = "="; - public const string GreaterThan = ">"; - public const string GreaterThanOrEqualTo = ">="; - public const string True = "true"; - public const string False = "false"; - } - - private OrderByCrossPartitionQueryPipelineStage( - IDocumentContainer documentContainer, - IReadOnlyList sortOrders, - QueryPaginationOptions queryPaginationOptions, - int maxConcurrency, - IEnumerable<(OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken)> uninitializedEnumeratorsAndTokens, - QueryState state, - CancellationToken cancellationToken) - { - this.documentContainer = documentContainer ?? throw new ArgumentNullException(nameof(documentContainer)); - this.sortOrders = sortOrders ?? throw new ArgumentNullException(nameof(sortOrders)); - this.enumerators = new PriorityQueue(new OrderByEnumeratorComparer(this.sortOrders)); - this.queryPaginationOptions = queryPaginationOptions ?? QueryPaginationOptions.Default; - this.maxConcurrency = maxConcurrency < 0 ? throw new ArgumentOutOfRangeException($"{nameof(maxConcurrency)} must be a non negative number.") : maxConcurrency; - this.uninitializedEnumeratorsAndTokens = new Queue<(OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken)>(uninitializedEnumeratorsAndTokens ?? throw new ArgumentNullException(nameof(uninitializedEnumeratorsAndTokens))); - this.state = state ?? InitializingQueryState; - this.cancellationToken = cancellationToken; - } - - public TryCatch Current { get; private set; } - - public ValueTask DisposeAsync() => default; - - private async ValueTask MoveNextAsync_Initialize_FromBeginningAsync( - OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, - ITrace trace) - { - this.cancellationToken.ThrowIfCancellationRequested(); - - if (uninitializedEnumerator == null) - { - throw new ArgumentNullException(nameof(uninitializedEnumerator)); - } - - // We need to prime the page - if (!await uninitializedEnumerator.MoveNextAsync(trace)) - { - // No more documents, so just return an empty page - this.Current = TryCatch.FromResult( - new QueryPage( - documents: EmptyPage, - requestCharge: 0, - activityId: string.Empty, - responseLengthInBytes: 0, - cosmosQueryExecutionInfo: default, - distributionPlanSpec: default, - disallowContinuationTokenMessage: default, - additionalHeaders: default, - state: this.state)); - return true; - } - - if (uninitializedEnumerator.Current.Failed) - { - if (IsSplitException(uninitializedEnumerator.Current.Exception)) - { - return await this.MoveNextAsync_InitializeAsync_HandleSplitAsync(uninitializedEnumerator, token: null, trace); - } - - this.uninitializedEnumeratorsAndTokens.Enqueue((uninitializedEnumerator, token: null)); - this.Current = TryCatch.FromException(uninitializedEnumerator.Current.Exception); - } - else - { - QueryPage page = uninitializedEnumerator.Current.Result.Page; - - if (!uninitializedEnumerator.Current.Result.Enumerator.MoveNext()) - { - // Page was empty - if (uninitializedEnumerator.FeedRangeState.State != null) - { - this.uninitializedEnumeratorsAndTokens.Enqueue((uninitializedEnumerator, token: null)); - } - - if ((this.uninitializedEnumeratorsAndTokens.Count == 0) && (this.enumerators.Count == 0)) - { - // Query did not match any results. We need to emit a fake empty page with null continuation - this.Current = TryCatch.FromResult( - new QueryPage( - documents: EmptyPage, - requestCharge: page.RequestCharge, - activityId: string.IsNullOrEmpty(page.ActivityId) ? Guid.NewGuid().ToString() : page.ActivityId, - responseLengthInBytes: page.ResponseLengthInBytes, - cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, - distributionPlanSpec: default, - disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, - additionalHeaders: page.AdditionalHeaders, - state: null)); - this.returnedFinalPage = true; - return true; - } - } - else - { - this.enumerators.Enqueue(uninitializedEnumerator); - } - - // Just return an empty page with the stats - this.Current = TryCatch.FromResult( - new QueryPage( - documents: EmptyPage, - requestCharge: page.RequestCharge, - activityId: page.ActivityId, - responseLengthInBytes: page.ResponseLengthInBytes, - cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, - distributionPlanSpec: default, - disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, - additionalHeaders: page.AdditionalHeaders, - state: this.state)); - } - - return true; - } - - private async ValueTask MoveNextAsync_Initialize_FilterAsync( - OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, - OrderByContinuationToken token, - ITrace trace) - { - this.cancellationToken.ThrowIfCancellationRequested(); - - if (uninitializedEnumerator == null) - { - throw new ArgumentNullException(nameof(uninitializedEnumerator)); - } - - if (token == null) - { - throw new ArgumentNullException(nameof(token)); - } - - TryCatch<(bool, int, TryCatch)> filterMonad = await FilterNextAsync( - uninitializedEnumerator, - this.sortOrders, - token, - trace, - cancellationToken: default); - - if (filterMonad.Failed) - { - if (IsSplitException(filterMonad.Exception)) - { - return await this.MoveNextAsync_InitializeAsync_HandleSplitAsync(uninitializedEnumerator, token, trace); - } - - this.Current = TryCatch.FromException(filterMonad.Exception); - return true; - } - - (bool doneFiltering, int itemsLeftToSkip, TryCatch monadicQueryByPage) = filterMonad.Result; - QueryPage page = uninitializedEnumerator.Current.Result.Page; - if (doneFiltering) - { - if (uninitializedEnumerator.Current.Result.Enumerator.Current != null) - { - this.enumerators.Enqueue(uninitializedEnumerator); - } - else if ((this.uninitializedEnumeratorsAndTokens.Count == 0) && (this.enumerators.Count == 0)) - { - // Query did not match any results. - // We need to emit a fake empty page with null continuation - this.Current = TryCatch.FromResult( - new QueryPage( - documents: EmptyPage, - requestCharge: page.RequestCharge, - activityId: string.IsNullOrEmpty(page.ActivityId) ? Guid.NewGuid().ToString() : page.ActivityId, - responseLengthInBytes: page.ResponseLengthInBytes, - cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, - distributionPlanSpec: default, - disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, - additionalHeaders: page.AdditionalHeaders, - state: null)); - this.returnedFinalPage = true; - return true; - } - } - else - { - if (monadicQueryByPage.Failed) - { - if (IsSplitException(filterMonad.Exception)) - { - return await this.MoveNextAsync_InitializeAsync_HandleSplitAsync(uninitializedEnumerator, token, trace); - } - } - - if (uninitializedEnumerator.FeedRangeState.State != default) - { - // We need to update the token - OrderByContinuationToken modifiedToken = new OrderByContinuationToken( - new ParallelContinuationToken( - ((CosmosString)uninitializedEnumerator.FeedRangeState.State.Value).Value, - ((FeedRangeEpk)uninitializedEnumerator.FeedRangeState.FeedRange).Range), - token.OrderByItems, - token.ResumeValues, - token.Rid, - itemsLeftToSkip, - token.Filter); - this.uninitializedEnumeratorsAndTokens.Enqueue((uninitializedEnumerator, modifiedToken)); - CosmosElement cosmosElementOrderByContinuationToken = OrderByContinuationToken.ToCosmosElement(modifiedToken); - CosmosArray continuationTokenList = CosmosArray.Create(new List() { cosmosElementOrderByContinuationToken }); - this.state = new QueryState(continuationTokenList); - } - } - - // Just return an empty page with the stats - this.Current = TryCatch.FromResult( - new QueryPage( - documents: EmptyPage, - requestCharge: page.RequestCharge, - activityId: page.ActivityId, - responseLengthInBytes: page.ResponseLengthInBytes, - cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, - distributionPlanSpec: default, - disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, - additionalHeaders: page.AdditionalHeaders, - state: InitializingQueryState)); - - return true; - } - - private async ValueTask MoveNextAsync_InitializeAsync_HandleSplitAsync( - OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, - OrderByContinuationToken token, - ITrace trace) - { - this.cancellationToken.ThrowIfCancellationRequested(); - - IReadOnlyList childRanges = await this.documentContainer.GetChildRangeAsync( - uninitializedEnumerator.FeedRangeState.FeedRange, - trace, - this.cancellationToken); - - if (childRanges.Count <= 1) - { - // We optimistically assumed that the cache is not stale. - // In the event that it is (where we only get back one child / the partition that we think got split) - // Then we need to refresh the cache - await this.documentContainer.RefreshProviderAsync(trace, this.cancellationToken); - childRanges = await this.documentContainer.GetChildRangeAsync( - uninitializedEnumerator.FeedRangeState.FeedRange, - trace, - this.cancellationToken); - } - - if (childRanges.Count < 1) - { - string errorMessage = "SDK invariant violated 82086B2D: Must have at least one EPK range in a cross partition enumerator"; - throw Resource.CosmosExceptions.CosmosExceptionFactory.CreateInternalServerErrorException( - message: errorMessage, - headers: null, - stackTrace: null, - trace: trace, - error: new Microsoft.Azure.Documents.Error { Code = "SDK_invariant_violated_82086B2D", Message = errorMessage }); - } - - if (childRanges.Count == 1) - { - // On a merge, the 410/1002 results in a single parent - // We maintain the current enumerator's range and let the RequestInvokerHandler logic kick in - OrderByQueryPartitionRangePageAsyncEnumerator childPaginator = new OrderByQueryPartitionRangePageAsyncEnumerator( - this.documentContainer, - uninitializedEnumerator.SqlQuerySpec, - new FeedRangeState(uninitializedEnumerator.FeedRangeState.FeedRange, uninitializedEnumerator.StartOfPageState), - partitionKey: null, - uninitializedEnumerator.QueryPaginationOptions, - uninitializedEnumerator.Filter, - this.cancellationToken); - this.uninitializedEnumeratorsAndTokens.Enqueue((childPaginator, token)); - } - else - { - // Split - foreach (FeedRangeInternal childRange in childRanges) - { - this.cancellationToken.ThrowIfCancellationRequested(); - - OrderByQueryPartitionRangePageAsyncEnumerator childPaginator = new OrderByQueryPartitionRangePageAsyncEnumerator( - this.documentContainer, - uninitializedEnumerator.SqlQuerySpec, - new FeedRangeState(childRange, uninitializedEnumerator.StartOfPageState), - partitionKey: null, - uninitializedEnumerator.QueryPaginationOptions, - uninitializedEnumerator.Filter, - this.cancellationToken); - this.uninitializedEnumeratorsAndTokens.Enqueue((childPaginator, token)); - } - } - - // Recursively retry - return await this.MoveNextAsync(trace); - } - - private async ValueTask MoveNextAsync_InitializeAsync(ITrace trace) - { - this.cancellationToken.ThrowIfCancellationRequested(); - - await ParallelPrefetch.PrefetchInParallelAsync( - this.uninitializedEnumeratorsAndTokens.Select(value => value.enumerator), - this.maxConcurrency, - trace, - this.cancellationToken); - (OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, OrderByContinuationToken token) = this.uninitializedEnumeratorsAndTokens.Dequeue(); - bool movedNext = token is null - ? await this.MoveNextAsync_Initialize_FromBeginningAsync(uninitializedEnumerator, trace) - : await this.MoveNextAsync_Initialize_FilterAsync(uninitializedEnumerator, token, trace); - return movedNext; - } - - private ValueTask MoveNextAsync_DrainPageAsync(ITrace trace) - { - this.cancellationToken.ThrowIfCancellationRequested(); - - if (trace == null) - { - throw new ArgumentNullException(nameof(trace)); - } - - OrderByQueryPartitionRangePageAsyncEnumerator currentEnumerator = default; - OrderByQueryResult orderByQueryResult = default; - - // Try to form a page with as many items in the sorted order without having to do async work. - List results = new List(); - while (results.Count < this.queryPaginationOptions.PageSizeLimit.GetValueOrDefault(int.MaxValue)) - { - currentEnumerator = this.enumerators.Dequeue(); - orderByQueryResult = new OrderByQueryResult(currentEnumerator.Current.Result.Enumerator.Current); - results.Add(orderByQueryResult); - - if (!currentEnumerator.Current.Result.Enumerator.MoveNext()) - { - // The order by page ran out of results - if (currentEnumerator.FeedRangeState.State != null) - { - // If the continuation isn't null - // then mark the enumerator as unitialized and it will get requeueed on the next iteration with a fresh page. - this.uninitializedEnumeratorsAndTokens.Enqueue((currentEnumerator, (OrderByContinuationToken)null)); - - // Use the token for the next page, since we fully drained the enumerator. - OrderByContinuationToken orderByContinuationToken = CreateOrderByContinuationToken( - new ParallelContinuationToken( - token: ((CosmosString)currentEnumerator.FeedRangeState.State.Value).Value, - range: ((FeedRangeEpk)currentEnumerator.FeedRangeState.FeedRange).Range), - orderByQueryResult, - skipCount: 0, - filter: currentEnumerator.Filter); - - CosmosElement cosmosElementOrderByContinuationToken = OrderByContinuationToken.ToCosmosElement(orderByContinuationToken); - CosmosArray continuationTokenList = CosmosArray.Create(new List() { cosmosElementOrderByContinuationToken }); - - this.state = new QueryState(continuationTokenList); - - // Return a page of results - // No stats to report, since we already reported it when we moved to this page. - this.Current = TryCatch.FromResult( - new QueryPage( - documents: results.Select(result => result.Payload).ToList(), - requestCharge: 0, - activityId: default, - responseLengthInBytes: 0, - cosmosQueryExecutionInfo: default, - distributionPlanSpec: default, - disallowContinuationTokenMessage: default, - additionalHeaders: currentEnumerator.Current.Result.Page.AdditionalHeaders, - state: this.state)); - return new ValueTask(true); - } - - // Todo: we can optimize this by having a special "Done" continuation token - // so we don't grab a full page and filter it through - // but this would break older clients, so wait for a compute only fork. - - break; - } - - this.enumerators.Enqueue(currentEnumerator); - } - - // It is possible that we emit multiple documents with the same rid due to JOIN queries. - // This means it is not enough to serialize the rid that we left on to resume the query. - // We need to also serialize the number of documents with that rid, so we can skip it when resuming - int skipCount = results.Where(result => string.Equals(result.Rid, orderByQueryResult.Rid)).Count(); - - // Create the continuation token. - CosmosElement state; - if ((this.enumerators.Count == 0) && (this.uninitializedEnumeratorsAndTokens.Count == 0)) - { - state = null; - } - else - { - OrderByContinuationToken orderByContinuationToken = CreateOrderByContinuationToken( - new ParallelContinuationToken( - token: currentEnumerator.StartOfPageState != null ? ((CosmosString)currentEnumerator.StartOfPageState.Value).Value : null, - range: ((FeedRangeEpk)currentEnumerator.FeedRangeState.FeedRange).Range), - orderByQueryResult, - skipCount, - currentEnumerator.Filter); - - CosmosElement cosmosElementOrderByContinuationToken = OrderByContinuationToken.ToCosmosElement(orderByContinuationToken); - CosmosArray continuationTokenList = CosmosArray.Create(new List() { cosmosElementOrderByContinuationToken }); - - state = continuationTokenList; - } - - this.state = state != null ? new QueryState(state) : null; - - // Return a page of results - // No stats to report, since we already reported it when we moved to this page. - this.Current = TryCatch.FromResult( - new QueryPage( - documents: results.Select(result => result.Payload).ToList(), - requestCharge: 0, - activityId: default, - responseLengthInBytes: 0, - cosmosQueryExecutionInfo: default, - distributionPlanSpec: default, - disallowContinuationTokenMessage: default, - additionalHeaders: currentEnumerator?.Current.Result.Page.AdditionalHeaders, - state: this.state)); - - if (state == null) - { - this.returnedFinalPage = true; - } - - return new ValueTask(true); - } - - //// In order to maintain the continuation token for the user we must drain with a few constraints - //// 1) We always drain from the partition, which has the highest priority item first - //// 2) If multiple partitions have the same priority item then we drain from the left most first - //// otherwise we would need to keep track of how many of each item we drained from each partition - //// (just like parallel queries). - //// Visually that look the following case where we have three partitions that are numbered and store letters. - //// For teaching purposes I have made each item a tuple of the following form: - //// - //// So that duplicates across partitions are distinct, but duplicates within partitions are indistinguishable. - //// |-------| |-------| |-------| - //// | | | | | | - //// | | | | | | - //// | | | | | | - //// | | | | | | - //// | | | | | | - //// | | | | | | - //// | | | | | | - //// |-------| |-------| |-------| - //// Now the correct drain order in this case is: - //// ,,,,,,,,,,, - //// ,,,,,,,,, - //// In more mathematical terms - //// 1) always comes before where x < z - //// 2) always come before where j < k - public ValueTask MoveNextAsync(ITrace trace) - { - this.cancellationToken.ThrowIfCancellationRequested(); - - if (trace == null) - { - throw new ArgumentNullException(nameof(trace)); - } - - if (this.uninitializedEnumeratorsAndTokens.Count != 0) - { - return this.MoveNextAsync_InitializeAsync(trace); - } - - if (this.enumerators.Count == 0) - { - if (!this.returnedFinalPage) - { - // return a empty page with null continuation token - this.Current = TryCatch.FromResult( - new QueryPage( - documents: EmptyPage, - requestCharge: 0, - activityId: Guid.NewGuid().ToString(), - responseLengthInBytes: 0, - cosmosQueryExecutionInfo: default, - distributionPlanSpec: default, - disallowContinuationTokenMessage: default, - additionalHeaders: default, - state: null)); - this.returnedFinalPage = true; - return new ValueTask(true); - } - - // Finished draining. - return new ValueTask(false); - } - - return this.MoveNextAsync_DrainPageAsync(trace); - } - - public static TryCatch MonadicCreate( - IDocumentContainer documentContainer, - SqlQuerySpec sqlQuerySpec, - IReadOnlyList targetRanges, - Cosmos.PartitionKey? partitionKey, - IReadOnlyList orderByColumns, - QueryPaginationOptions queryPaginationOptions, - int maxConcurrency, - CosmosElement continuationToken, - CancellationToken cancellationToken) - { - // TODO (brchon): For now we are not honoring non deterministic ORDER BY queries, since there is a bug in the continuation logic. - // We can turn it back on once the bug is fixed. - // This shouldn't hurt any query results. - - if (documentContainer == null) - { - throw new ArgumentNullException(nameof(documentContainer)); - } - - if (sqlQuerySpec == null) - { - throw new ArgumentNullException(nameof(sqlQuerySpec)); - } - - if (targetRanges == null) - { - throw new ArgumentNullException(nameof(targetRanges)); - } - - if (targetRanges.Count == 0) - { - throw new ArgumentException($"{nameof(targetRanges)} must not be empty."); - } - - if (orderByColumns == null) - { - throw new ArgumentNullException(nameof(orderByColumns)); - } - - if (orderByColumns.Count == 0) - { - throw new ArgumentException($"{nameof(orderByColumns)} must not be empty."); - } - - List<(OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken)> enumeratorsAndTokens; - if (continuationToken == null) - { - // Start off all the partition key ranges with null continuation - SqlQuerySpec rewrittenQueryForOrderBy = new SqlQuerySpec( - sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), - sqlQuerySpec.Parameters); - - enumeratorsAndTokens = targetRanges - .Select(range => (new OrderByQueryPartitionRangePageAsyncEnumerator( - documentContainer, - rewrittenQueryForOrderBy, - new FeedRangeState(range, state: default), - partitionKey, - queryPaginationOptions, - TrueFilter, - cancellationToken), (OrderByContinuationToken)null)) - .ToList(); - } - else - { - TryCatch> monadicGetOrderByContinuationTokenMapping = MonadicGetOrderByContinuationTokenMapping( - targetRanges, - continuationToken, - orderByColumns.Count); - if (monadicGetOrderByContinuationTokenMapping.Failed) - { - return TryCatch.FromException(monadicGetOrderByContinuationTokenMapping.Exception); - } - - PartitionMapper.PartitionMapping partitionMapping = monadicGetOrderByContinuationTokenMapping.Result; - - OrderByContinuationToken targetContinuationToken = partitionMapping.TargetMapping.Values.First(); - - int orderByResumeValueCount = 0; - IReadOnlyList resumeValues; - IReadOnlyList orderByItems; - if (targetContinuationToken.ResumeValues != null) - { - // Use SqlQueryResumeValue for continuation if it is present. - resumeValues = targetContinuationToken.ResumeValues; - orderByItems = null; - orderByResumeValueCount = resumeValues.Count; - } - else - { - // If continuation token has only OrderByItems, check if it can be converted to SqlQueryResumeValue. This will - // help avoid re-writing the query. Conversion will work as long as the order by item type is a supported type. - orderByResumeValueCount = targetContinuationToken.OrderByItems.Count; - - if (ContainsSupportedResumeTypes(targetContinuationToken.OrderByItems)) - { - // Convert the order by items to SqlQueryResumeValue - List generatedResumeValues = new List(targetContinuationToken.OrderByItems.Count); - //foreach (CosmosElement orderByItem in orderByItems) - foreach (OrderByItem orderByItem in targetContinuationToken.OrderByItems) - { - generatedResumeValues.Add(SqlQueryResumeValue.FromOrderByValue(orderByItem.Item)); - } - - resumeValues = generatedResumeValues; - orderByItems = null; - } - else - { - orderByItems = targetContinuationToken.OrderByItems.Select(x => x.Item).ToList(); - resumeValues = null; - } - } - - if (orderByResumeValueCount != orderByColumns.Count) - { - return TryCatch.FromException( - new MalformedContinuationTokenException( - $"Order By Items from continuation token did not match the query text. " + - $"Order by item count: {orderByResumeValueCount} did not match column count {orderByColumns.Count()}. " + - $"Continuation token: {targetContinuationToken}")); - } - - enumeratorsAndTokens = new List<(OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken)>(); - if (resumeValues != null) - { - // Continuation contains resume values, so update SqlQuerySpec to include SqlQueryResumeFilter which - // will specify the resume point to the backend. This avoid having to re-write the query. - - // Process partitions left of Target. The resume values in these partition have - // already been processed so exclude flag is set to true. - SqlQuerySpec leftQuerySpec = new SqlQuerySpec( - sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), - sqlQuerySpec.Parameters, - new SqlQueryResumeFilter(resumeValues, null, true)); - - foreach (KeyValuePair kvp in partitionMapping.MappingLeftOfTarget) - { - FeedRangeEpk range = kvp.Key; - OrderByContinuationToken token = kvp.Value; - OrderByQueryPartitionRangePageAsyncEnumerator remoteEnumerator = new OrderByQueryPartitionRangePageAsyncEnumerator( - documentContainer, - leftQuerySpec, - new FeedRangeState(range, token?.ParallelContinuationToken?.Token != null ? new QueryState(CosmosString.Create(token.ParallelContinuationToken.Token)) : null), - partitionKey, - queryPaginationOptions, - filter: null, - cancellationToken); - - enumeratorsAndTokens.Add((remoteEnumerator, token)); - } - - // Process Target Partitions which is the last partition from which data has been returned. - // For this partition the Rid value needs to be set if present. Exclude flag is not set as the document - // matching the Rid will be skipped in SDK based on SkipCount value. - // Backend requests can contains both SqlQueryResumeFilter and ContinuationToken and the backend will pick - // the resume point that is bigger i.e. most restrictive - foreach (KeyValuePair kvp in partitionMapping.TargetMapping) - { - FeedRangeEpk range = kvp.Key; - OrderByContinuationToken token = kvp.Value; - - SqlQuerySpec targetQuerySpec = new SqlQuerySpec( - sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), - sqlQuerySpec.Parameters, - new SqlQueryResumeFilter(resumeValues, token?.Rid, false)); - - OrderByQueryPartitionRangePageAsyncEnumerator remoteEnumerator = new OrderByQueryPartitionRangePageAsyncEnumerator( - documentContainer, - targetQuerySpec, - new FeedRangeState(range, token?.ParallelContinuationToken?.Token != null ? new QueryState(CosmosString.Create(token.ParallelContinuationToken.Token)) : null), - partitionKey, - queryPaginationOptions, - filter: null, - cancellationToken); - - enumeratorsAndTokens.Add((remoteEnumerator, token)); - } - - // Process partitions right of target. The Resume value in these partitions have not been processed so the exclude value is set to false. - SqlQuerySpec rightQuerySpec = new SqlQuerySpec( - sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), - sqlQuerySpec.Parameters, - new SqlQueryResumeFilter(resumeValues, null, false)); - - foreach (KeyValuePair kvp in partitionMapping.MappingRightOfTarget) - { - FeedRangeEpk range = kvp.Key; - OrderByContinuationToken token = kvp.Value; - OrderByQueryPartitionRangePageAsyncEnumerator remoteEnumerator = new OrderByQueryPartitionRangePageAsyncEnumerator( - documentContainer, - rightQuerySpec, - new FeedRangeState(range, token?.ParallelContinuationToken?.Token != null ? new QueryState(CosmosString.Create(token.ParallelContinuationToken.Token)) : null), - partitionKey, - queryPaginationOptions, - filter: null, - cancellationToken); - - enumeratorsAndTokens.Add((remoteEnumerator, token)); - } - } - else - { - // If continuation token doesn't have resume values or if order by items cannot be converted to resume values then - // rewrite the query filter to get the correct resume point - ReadOnlyMemory<(OrderByColumn, CosmosElement)> columnAndItems = orderByColumns.Zip(orderByItems, (column, item) => (column, item)).ToArray(); - - // For ascending order-by, left of target partition has filter expression > value, - // right of target partition has filter expression >= value, - // and target partition takes the previous filter from continuation (or true if no continuation) - (string leftFilter, string targetFilter, string rightFilter) = OrderByCrossPartitionQueryPipelineStage.GetFormattedFilters(columnAndItems); - List<(IReadOnlyDictionary, string)> tokenMappingAndFilters = new List<(IReadOnlyDictionary, string)>() - { - { (partitionMapping.MappingLeftOfTarget, leftFilter) }, - { (partitionMapping.TargetMapping, targetFilter) }, - { (partitionMapping.MappingRightOfTarget, rightFilter) }, - }; - - foreach ((IReadOnlyDictionary tokenMapping, string filter) in tokenMappingAndFilters) - { - SqlQuerySpec rewrittenQueryForOrderBy = new SqlQuerySpec( - sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: filter), - sqlQuerySpec.Parameters); - - foreach (KeyValuePair kvp in tokenMapping) - { - FeedRangeEpk range = kvp.Key; - OrderByContinuationToken token = kvp.Value; - OrderByQueryPartitionRangePageAsyncEnumerator remoteEnumerator = new OrderByQueryPartitionRangePageAsyncEnumerator( - documentContainer, - rewrittenQueryForOrderBy, - new FeedRangeState(range, token?.ParallelContinuationToken?.Token != null ? new QueryState(CosmosString.Create(token.ParallelContinuationToken.Token)) : null), - partitionKey, - queryPaginationOptions, - filter, - cancellationToken); - - enumeratorsAndTokens.Add((remoteEnumerator, token)); - } - } - } - } - - OrderByCrossPartitionQueryPipelineStage stage = new OrderByCrossPartitionQueryPipelineStage( - documentContainer, - orderByColumns.Select(column => column.SortOrder).ToList(), - queryPaginationOptions, - maxConcurrency, - enumeratorsAndTokens, - continuationToken == null ? null : new QueryState(continuationToken), - cancellationToken); - return TryCatch.FromResult(stage); - } - - private static TryCatch> MonadicGetOrderByContinuationTokenMapping( - IReadOnlyList partitionKeyRanges, - CosmosElement continuationToken, - int numOrderByItems) - { - if (partitionKeyRanges == null) - { - throw new ArgumentOutOfRangeException(nameof(partitionKeyRanges)); - } - - if (numOrderByItems < 0) - { - throw new ArgumentOutOfRangeException(nameof(numOrderByItems)); - } - - if (continuationToken == null) - { - throw new ArgumentNullException(nameof(continuationToken)); - } - - TryCatch> monadicExtractContinuationTokens = MonadicExtractOrderByTokens(continuationToken, numOrderByItems); - if (monadicExtractContinuationTokens.Failed) - { - return TryCatch>.FromException(monadicExtractContinuationTokens.Exception); - } - - return PartitionMapper.MonadicGetPartitionMapping( - partitionKeyRanges, - monadicExtractContinuationTokens.Result); - } - - private static TryCatch> MonadicExtractOrderByTokens( - CosmosElement continuationToken, - int numOrderByColumns) - { - if (continuationToken == null) - { - return TryCatch>.FromResult(default); - } - - if (!(continuationToken is CosmosArray cosmosArray)) - { - return TryCatch>.FromException( - new MalformedContinuationTokenException( - $"Order by continuation token must be an array: {continuationToken}.")); - } - - if (cosmosArray.Count == 0) - { - return TryCatch>.FromException( - new MalformedContinuationTokenException( - $"Order by continuation token cannot be empty: {continuationToken}.")); - } - - List orderByContinuationTokens = new List(); - foreach (CosmosElement arrayItem in cosmosArray) - { - TryCatch tryCreateOrderByContinuationToken = OrderByContinuationToken.TryCreateFromCosmosElement(arrayItem); - if (!tryCreateOrderByContinuationToken.Succeeded) - { - return TryCatch>.FromException(tryCreateOrderByContinuationToken.Exception); - } - - orderByContinuationTokens.Add(tryCreateOrderByContinuationToken.Result); - } - - foreach (OrderByContinuationToken suppliedOrderByContinuationToken in orderByContinuationTokens) - { - int orderByCount = GetOrderByItemCount(suppliedOrderByContinuationToken); - if (orderByCount != numOrderByColumns) - { - return TryCatch>.FromException( - new MalformedContinuationTokenException( - $"Invalid order-by items in continuation token {continuationToken} for OrderBy~Context.")); - } - } - - return TryCatch>.FromResult(orderByContinuationTokens); - } - - private static int GetOrderByItemCount(OrderByContinuationToken orderByContinuationToken) - { - return orderByContinuationToken.ResumeValues != null ? - orderByContinuationToken.ResumeValues.Count : orderByContinuationToken.OrderByItems.Count; - } - - private static void AppendToBuilders((StringBuilder leftFilter, StringBuilder targetFilter, StringBuilder rightFilter) builders, object str) - { - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, str, str, str); - } - - private static void AppendToBuilders((StringBuilder leftFilter, StringBuilder targetFilter, StringBuilder rightFilter) builders, object left, object target, object right) - { - builders.leftFilter.Append(left); - builders.targetFilter.Append(target); - builders.rightFilter.Append(right); - } - - private static (string leftFilter, string targetFilter, string rightFilter) GetFormattedFilters( - ReadOnlyMemory<(OrderByColumn orderByColumn, CosmosElement orderByItem)> columnAndItems) - { - // When we run cross partition queries, - // we only serialize the continuation token for the partition that we left off on. - // The only problem is that when we resume the order by query, - // we don't have continuation tokens for all other partition. - // The saving grace is that the data has a composite sort order(query sort order, partition key range id) - // so we can generate range filters which in turn the backend will turn into rid based continuation tokens, - // which is enough to get the streams of data flowing from all partitions. - // The details of how this is done is described below: - int numOrderByItems = columnAndItems.Length; - bool isSingleOrderBy = numOrderByItems == 1; - StringBuilder left = new StringBuilder(); - StringBuilder target = new StringBuilder(); - StringBuilder right = new StringBuilder(); - - (StringBuilder, StringBuilder, StringBuilder) builders = (left, target, right); - - if (isSingleOrderBy) - { - //For a single order by query we resume the continuations in this manner - // Suppose the query is SELECT* FROM c ORDER BY c.string ASC - // And we left off on partition N with the value "B" - // Then - // All the partitions to the left will have finished reading "B" - // Partition N is still reading "B" - // All the partitions to the right have let to read a "B - // Therefore the filters should be - // > "B" , >= "B", and >= "B" respectively - // Repeat the same logic for DESC and you will get - // < "B", <= "B", and <= "B" respectively - // The general rule becomes - // For ASC - // > for partitions to the left - // >= for the partition we left off on - // >= for the partitions to the right - // For DESC - // < for partitions to the left - // <= for the partition we left off on - // <= for the partitions to the right - (OrderByColumn orderByColumn, CosmosElement orderByItem) = columnAndItems.Span[0]; - (string expression, SortOrder sortOrder) = (orderByColumn.Expression, orderByColumn.SortOrder); - - AppendToBuilders(builders, "( "); - - // We need to add the filter for within the same type. - if (orderByItem is not CosmosUndefined) - { - StringBuilder sb = new StringBuilder(); - CosmosElementToQueryLiteral cosmosElementToQueryLiteral = new CosmosElementToQueryLiteral(sb); - orderByItem.Accept(cosmosElementToQueryLiteral); - - string orderByItemToString = sb.ToString(); - - left.Append($"{expression} {(sortOrder == SortOrder.Descending ? Expressions.LessThan : Expressions.GreaterThan)} {orderByItemToString}"); - target.Append($"{expression} {(sortOrder == SortOrder.Descending ? Expressions.LessThanOrEqualTo : Expressions.GreaterThanOrEqualTo)} {orderByItemToString}"); - right.Append($"{expression} {(sortOrder == SortOrder.Descending ? Expressions.LessThanOrEqualTo : Expressions.GreaterThanOrEqualTo)} {orderByItemToString}"); - } - else - { - // User is ordering by undefined, so we need to avoid a null reference exception. - - // What we really want is to support expression > undefined, - // but the engine evaluates to undefined instead of true or false, - // so we work around this by using the IS_DEFINED() system function. - - ComparisionWithUndefinedFilters filters = new ComparisionWithUndefinedFilters(expression); - left.Append($"{(sortOrder == SortOrder.Descending ? filters.LessThan : filters.GreaterThan)}"); - target.Append($"{(sortOrder == SortOrder.Descending ? filters.LessThanOrEqualTo : filters.GreaterThanOrEqualTo)}"); - right.Append($"{(sortOrder == SortOrder.Descending ? filters.LessThanOrEqualTo : filters.GreaterThanOrEqualTo)}"); - } - - // Now we need to include all the types that match the sort order. - ReadOnlyMemory isDefinedFunctions = orderByItem.Accept(CosmosElementToIsSystemFunctionsVisitor.Singleton, sortOrder == SortOrder.Ascending); - foreach (string isDefinedFunction in isDefinedFunctions.Span) - { - AppendToBuilders(builders, " OR "); - AppendToBuilders(builders, $"{isDefinedFunction}({expression})"); - } - - AppendToBuilders(builders, " )"); - } - else - { - //For a multi order by query - // Suppose the query is SELECT* FROM c ORDER BY c.string ASC, c.number ASC - // And we left off on partition N with the value("A", 1) - // Then - // All the partitions to the left will have finished reading("A", 1) - // Partition N is still reading("A", 1) - // All the partitions to the right have let to read a "(A", 1) - // The filters are harder to derive since their are multiple columns - // But the problem reduces to "How do you know one document comes after another in a multi order by query" - // The answer is to just look at it one column at a time. - // For this particular scenario: - // If a first column is greater ex. ("B", blah), then the document comes later in the sort order - // Therefore we want all documents where the first column is greater than "A" which means > "A" - // Or if the first column is a tie, then you look at the second column ex. ("A", blah). - // Therefore we also want all documents where the first column was a tie but the second column is greater which means = "A" AND > 1 - // Therefore the filters should be - // (> "A") OR (= "A" AND > 1), (> "A") OR (= "A" AND >= 1), (> "A") OR (= "A" AND >= 1) - // Notice that if we repeated the same logic we for single order by we would have gotten - // > "A" AND > 1, >= "A" AND >= 1, >= "A" AND >= 1 - // which is wrong since we missed some documents - // Repeat the same logic for ASC, DESC - // (> "A") OR (= "A" AND < 1), (> "A") OR (= "A" AND <= 1), (> "A") OR (= "A" AND <= 1) - // Again for DESC, ASC - // (< "A") OR (= "A" AND > 1), (< "A") OR (= "A" AND >= 1), (< "A") OR (= "A" AND >= 1) - // And again for DESC DESC - // (< "A") OR (= "A" AND < 1), (< "A") OR (= "A" AND <= 1), (< "A") OR (= "A" AND <= 1) - // The general we look at all prefixes of the order by columns to look for tie breakers. - // Except for the full prefix whose last column follows the rules for single item order by - // And then you just OR all the possibilities together - for (int prefixLength = 1; prefixLength <= numOrderByItems; prefixLength++) - { - ReadOnlySpan<(OrderByColumn orderByColumn, CosmosElement orderByItem)> columnAndItemPrefix = columnAndItems.Span.Slice(start: 0, length: prefixLength); - - bool lastPrefix = prefixLength == numOrderByItems; - - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, "("); - - for (int index = 0; index < prefixLength; index++) - { - string expression = columnAndItemPrefix[index].orderByColumn.Expression; - SortOrder sortOrder = columnAndItemPrefix[index].orderByColumn.SortOrder; - CosmosElement orderByItem = columnAndItemPrefix[index].orderByItem; - bool lastItem = index == prefixLength - 1; - - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, "("); - - bool wasInequality; - // We need to add the filter for within the same type. - if (orderByItem is CosmosUndefined) - { - ComparisionWithUndefinedFilters filters = new ComparisionWithUndefinedFilters(expression); - - // Refer to the logic from single order by for how we are handling order by undefined - if (lastItem) - { - if (lastPrefix) - { - if (sortOrder == SortOrder.Descending) - { - // <, <=, <= - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, filters.LessThan, filters.LessThanOrEqualTo, filters.LessThanOrEqualTo); - } - else - { - // >, >=, >= - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, filters.GreaterThan, filters.GreaterThanOrEqualTo, filters.GreaterThanOrEqualTo); - } - } - else - { - if (sortOrder == SortOrder.Descending) - { - // <, <, < - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, filters.LessThan, filters.LessThan, filters.LessThan); - } - else - { - // >, >, > - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, filters.GreaterThan, filters.GreaterThan, filters.GreaterThan); - } - } - - wasInequality = true; - } - else - { - // =, =, = - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, filters.EqualTo); - wasInequality = false; - } - } - else - { - // Append Expression - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, expression); - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, " "); - - // Append Binary Operator - if (lastItem) - { - string inequality = sortOrder == SortOrder.Descending ? Expressions.LessThan : Expressions.GreaterThan; - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, inequality); - if (lastPrefix) - { - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, string.Empty, Expressions.EqualTo, Expressions.EqualTo); - } - - wasInequality = true; - } - else - { - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, Expressions.EqualTo); - wasInequality = false; - } - - // Append OrderBy Item - StringBuilder sb = new StringBuilder(); - CosmosElementToQueryLiteral cosmosElementToQueryLiteral = new CosmosElementToQueryLiteral(sb); - orderByItem.Accept(cosmosElementToQueryLiteral); - string orderByItemToString = sb.ToString(); - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, " "); - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, orderByItemToString); - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, " "); - } - - if (wasInequality) - { - // Now we need to include all the types that match the sort order. - ReadOnlyMemory isDefinedFunctions = orderByItem.Accept(CosmosElementToIsSystemFunctionsVisitor.Singleton, sortOrder == SortOrder.Ascending); - foreach (string isDefinedFunction in isDefinedFunctions.Span) - { - AppendToBuilders(builders, " OR "); - AppendToBuilders(builders, $"{isDefinedFunction}({expression}) "); - } - } - - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, ")"); - - if (!lastItem) - { - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, " AND "); - } - } - - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, ")"); - if (!lastPrefix) - { - OrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, " OR "); - } - } - } - - return (left.ToString(), target.ToString(), right.ToString()); - } - - private static OrderByContinuationToken CreateOrderByContinuationToken( - ParallelContinuationToken parallelToken, - OrderByQueryResult orderByQueryResult, - int skipCount, - string filter) - { - OrderByContinuationToken token; - // If order by items have c* types then it cannot be converted to resume values - if (ContainsSupportedResumeTypes(orderByQueryResult.OrderByItems)) - { - List resumeValues = new List(orderByQueryResult.OrderByItems.Count); - foreach (OrderByItem orderByItem in orderByQueryResult.OrderByItems) - { - resumeValues.Add(SqlQueryResumeValue.FromOrderByValue(orderByItem.Item)); - } - - token = new OrderByContinuationToken( - parallelToken, - orderByItems: null, - resumeValues, - orderByQueryResult.Rid, - skipCount: skipCount, - filter: filter); - } - else - { - token = new OrderByContinuationToken( - parallelToken, - orderByQueryResult.OrderByItems, - resumeValues: null, - orderByQueryResult.Rid, - skipCount: skipCount, - filter: filter); - } - - return token; - } - - // Helper method to check that resume values are of type that is supported by SqlQueryResumeValue - private static bool ContainsSupportedResumeTypes(IReadOnlyList orderByItems) - { - foreach (OrderByItem orderByItem in orderByItems) - { - if (!orderByItem.Item.Accept(SupportedResumeTypeVisitor.Singleton)) - { - return false; - } - } - - return true; - } - - private static async Task monadicQueryByPage)>> FilterNextAsync( - OrderByQueryPartitionRangePageAsyncEnumerator enumerator, - IReadOnlyList sortOrders, - OrderByContinuationToken continuationToken, - ITrace trace, - CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - // When we resume a query on a partition there is a possibility that we only read a partial page from the backend - // meaning that will we repeat some documents if we didn't do anything about it. - // The solution is to filter all the documents that come before in the sort order, since we have already emitted them to the client. - // The key is to seek until we get an order by value that matches the order by value we left off on. - // Once we do that we need to seek to the correct _rid within the term, - // since there might be many documents with the same order by value we left off on. - // Finally we need to skip some duplicate _rids, since JOINs emit multiples documents with the same rid and we read a partial page. - // You can also think about this as a seek on a composite index where the columns are [sort_order, rid, skip_count] - - int itemsToSkip = continuationToken.SkipCount; - if (!ResourceId.TryParse(continuationToken.Rid, out ResourceId continuationRid)) - { - return TryCatch<(bool, int, TryCatch)>.FromException( - new MalformedContinuationTokenException( - $"Invalid Rid in the continuation token {continuationToken.ParallelContinuationToken.Token} for OrderBy~Context.")); - } - - if (!await enumerator.MoveNextAsync(trace)) - { - return TryCatch<(bool, int, TryCatch)>.FromResult((true, 0, enumerator.Current)); - } - - TryCatch monadicOrderByQueryPage = enumerator.Current; - if (monadicOrderByQueryPage.Failed) - { - return TryCatch<(bool, int, TryCatch)>.FromException(monadicOrderByQueryPage.Exception); - } - - OrderByQueryPage orderByQueryPage = monadicOrderByQueryPage.Result; - IEnumerator documents = orderByQueryPage.Enumerator; - - while (documents.MoveNext()) - { - int sortOrderCompare = 0; - // Filter out documents until we find something that matches the sort order. - OrderByQueryResult orderByResult = new OrderByQueryResult(documents.Current); - for (int i = 0; (i < sortOrders.Count) && (sortOrderCompare == 0); ++i) - { - sortOrderCompare = continuationToken.ResumeValues != null - ? continuationToken.ResumeValues[i].CompareTo(orderByResult.OrderByItems[i].Item) - : ItemComparer.Instance.Compare( - continuationToken.OrderByItems[i].Item, - orderByResult.OrderByItems[i].Item); - - if (sortOrderCompare != 0) - { - sortOrderCompare = sortOrders[i] == SortOrder.Ascending ? sortOrderCompare : -sortOrderCompare; - } - } - - if (sortOrderCompare < 0) - { - // We might have passed the item due to deletions and filters. - return TryCatch<(bool, int, TryCatch)>.FromResult((true, 0, enumerator.Current)); - } - - if (sortOrderCompare > 0) - { - // This document does not match the sort order, so skip it. - continue; - } - - // Once the item matches the order by items from the continuation tokens - // We still need to remove all the documents that have a lower or same rid in the rid sort order. - // If there is a tie in the sort order the documents should be in _rid order in the same direction as the index (given by the backend) - ResourceId rid = ResourceId.Parse(orderByResult.Rid); - int ridOrderCompare = continuationRid.Document.CompareTo(rid.Document); - - Lazy cosmosQueryExecutionInfo = orderByQueryPage.Page.CosmosQueryExecutionInfo; - if ((cosmosQueryExecutionInfo == null) || cosmosQueryExecutionInfo.Value.ReverseRidEnabled) - { - // If reverse rid is enabled on the backend then fallback to the old way of doing it. - if (sortOrders[0] == SortOrder.Descending) - { - ridOrderCompare = -ridOrderCompare; - } - } - else - { - // Go by the whatever order the index wants - if (cosmosQueryExecutionInfo.Value.ReverseIndexScan) - { - ridOrderCompare = -ridOrderCompare; - } - } - - if (ridOrderCompare < 0) - { - // We might have passed the rid due to deletions and filters. - return TryCatch<(bool, int, TryCatch)>.FromResult((true, 0, enumerator.Current)); - } - - if (ridOrderCompare > 0) - { - // This document does not match the rid order, so skip it. - continue; - } - - // At this point we need to skip due to joins - if (--itemsToSkip < 0) - { - return TryCatch<(bool, int, TryCatch)>.FromResult((true, 0, enumerator.Current)); - } - } - - // If we made it here it means we failed to find the resume order by item which is possible - // if the user added documents inbetween continuations, so we need to yield and filter the next page of results also. - return TryCatch<(bool, int, TryCatch)>.FromResult((false, itemsToSkip, enumerator.Current)); - } - - private static bool IsSplitException(Exception exception) - { - while (exception.InnerException != null) - { - exception = exception.InnerException; - } - - return exception.IsPartitionSplitException(); - } - - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.cancellationToken = cancellationToken; - foreach (OrderByQueryPartitionRangePageAsyncEnumerator enumerator in this.enumerators) - { - enumerator.SetCancellationToken(cancellationToken); - } - - foreach ((OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken) enumeratorAndToken in this.uninitializedEnumeratorsAndTokens) - { - enumeratorAndToken.Item1.SetCancellationToken(cancellationToken); - } - } - - private sealed class CosmosElementToIsSystemFunctionsVisitor : ICosmosElementVisitor> - { - public static readonly CosmosElementToIsSystemFunctionsVisitor Singleton = new CosmosElementToIsSystemFunctionsVisitor(); - - private static class IsSystemFunctions - { - public const string Defined = "IS_DEFINED"; - public const string Undefined = "NOT IS_DEFINED"; - public const string Null = "IS_NULL"; - public const string Boolean = "IS_BOOLEAN"; - public const string Number = "IS_NUMBER"; - public const string String = "IS_STRING"; - public const string Array = "IS_ARRAY"; - public const string Object = "IS_OBJECT"; - } - - private static readonly ReadOnlyMemory SystemFunctionSortOrder = new string[] - { - IsSystemFunctions.Undefined, - IsSystemFunctions.Null, - IsSystemFunctions.Boolean, - IsSystemFunctions.Number, - IsSystemFunctions.String, - IsSystemFunctions.Array, - IsSystemFunctions.Object, - }; - - private static readonly ReadOnlyMemory ExtendedTypesSystemFunctionSortOrder = new string[] - { - IsSystemFunctions.Undefined, - IsSystemFunctions.Defined - }; - - private static class SortOrder - { - public const int Undefined = 0; - public const int Null = 1; - public const int Boolean = 2; - public const int Number = 3; - public const int String = 4; - public const int Array = 5; - public const int Object = 6; - } - - private static class ExtendedTypesSortOrder - { - public const int Undefined = 0; - public const int Defined = 1; - } - - private CosmosElementToIsSystemFunctionsVisitor() - { - } - - public ReadOnlyMemory Visit(CosmosArray cosmosArray, bool isAscending) - { - return GetIsDefinedFunctions(SortOrder.Array, isAscending); - } - - public ReadOnlyMemory Visit(CosmosBinary cosmosBinary, bool isAscending) - { - return GetExtendedTypesIsDefinedFunctions(ExtendedTypesSortOrder.Defined, isAscending); - } - - public ReadOnlyMemory Visit(CosmosBoolean cosmosBoolean, bool isAscending) - { - return GetIsDefinedFunctions(SortOrder.Boolean, isAscending); - } - - public ReadOnlyMemory Visit(CosmosGuid cosmosGuid, bool isAscending) - { - return GetExtendedTypesIsDefinedFunctions(ExtendedTypesSortOrder.Defined, isAscending); - } - - public ReadOnlyMemory Visit(CosmosNull cosmosNull, bool isAscending) - { - return GetIsDefinedFunctions(SortOrder.Null, isAscending); - } - - public ReadOnlyMemory Visit(CosmosUndefined cosmosUndefined, bool isAscending) - { - return isAscending ? SystemFunctionSortOrder.Slice(start: 1) : ReadOnlyMemory.Empty; - } - - public ReadOnlyMemory Visit(CosmosNumber cosmosNumber, bool isAscending) - { - return GetIsDefinedFunctions(SortOrder.Number, isAscending); - } - - public ReadOnlyMemory Visit(CosmosObject cosmosObject, bool isAscending) - { - return GetIsDefinedFunctions(SortOrder.Object, isAscending); - } - - public ReadOnlyMemory Visit(CosmosString cosmosString, bool isAscending) - { - return GetIsDefinedFunctions(SortOrder.String, isAscending); - } - - private static ReadOnlyMemory GetIsDefinedFunctions(int index, bool isAscending) - { - return isAscending ? SystemFunctionSortOrder.Slice(index + 1) : SystemFunctionSortOrder.Slice(start: 0, index); - } - - private static ReadOnlyMemory GetExtendedTypesIsDefinedFunctions(int index, bool isAscending) - { - return isAscending ? - ExtendedTypesSystemFunctionSortOrder.Slice(index + 1) : - ExtendedTypesSystemFunctionSortOrder.Slice(start: 0, index); - } - } - - private readonly struct ComparisionWithUndefinedFilters - { - public ComparisionWithUndefinedFilters( - string expression) - { - this.LessThan = "false"; - this.LessThanOrEqualTo = $"NOT IS_DEFINED({expression})"; - this.EqualTo = $"NOT IS_DEFINED({expression})"; - this.GreaterThan = $"IS_DEFINED({expression})"; - this.GreaterThanOrEqualTo = "true"; - } - - public string LessThan { get; } - public string LessThanOrEqualTo { get; } - public string EqualTo { get; } - public string GreaterThan { get; } - public string GreaterThanOrEqualTo { get; } - } - - private sealed class SupportedResumeTypeVisitor : ICosmosElementVisitor - { - public static readonly SupportedResumeTypeVisitor Singleton = new SupportedResumeTypeVisitor(); - - private SupportedResumeTypeVisitor() - { - } - - public bool Visit(CosmosArray cosmosArray) - { - return true; - } - - public bool Visit(CosmosBinary cosmosBinary) - { - return false; - } - - public bool Visit(CosmosBoolean cosmosBoolean) - { - return true; - } - - public bool Visit(CosmosGuid cosmosGuid) - { - return false; - } - - public bool Visit(CosmosNull cosmosNull) - { - return true; - } - - public bool Visit(CosmosNumber cosmosNumber) - { - return cosmosNumber.Accept(SqlQueryResumeValue.SupportedResumeNumberTypeVisitor.Singleton); - } - - public bool Visit(CosmosObject cosmosObject) - { - return true; - } - - public bool Visit(CosmosString cosmosString) - { - return true; - } - - public bool Visit(CosmosUndefined cosmosUndefined) - { - return true; - } - } - } -} +// ------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +// ------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy +{ + using System; + using System.Collections.Generic; + using System.Linq; + using System.Text; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.CosmosElements; + using Microsoft.Azure.Cosmos.Pagination; + using Microsoft.Azure.Cosmos.Query.Core.Collections; + using Microsoft.Azure.Cosmos.Query.Core.Exceptions; + using Microsoft.Azure.Cosmos.Query.Core.Monads; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.Parallel; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; + using Microsoft.Azure.Cosmos.Query.Core.QueryClient; + using Microsoft.Azure.Cosmos.Tracing; + using ResourceId = Documents.ResourceId; + + internal sealed class OrderByCrossPartitionQueryPipelineStage : IQueryPipelineStage + { + /// + /// Order by queries are rewritten to allow us to inject a filter. + /// This placeholder is so that we can just string replace it with the filter we want without having to understand the structure of the query. + /// + private const string FormatPlaceHolder = "{documentdb-formattableorderbyquery-filter}"; + + /// + /// If query does not need a filter then we replace the FormatPlaceHolder with "true", since + /// "SELECT * FROM c WHERE blah and true" is the same as "SELECT * FROM c where blah" + /// + private const string TrueFilter = "true"; + + private static readonly QueryState InitializingQueryState = new QueryState(CosmosString.Create("ORDER BY NOT INITIALIZED YET!")); + + private static readonly IReadOnlyList EmptyPage = new List(); + + private sealed class InitializationParameters + { + public IDocumentContainer DocumentContainer { get; } + + public SqlQuerySpec SqlQuerySpec { get; } + + public IReadOnlyList TargetRanges { get; } + + public Cosmos.PartitionKey? PartitionKey { get; } + + public IReadOnlyList OrderByColumns { get; } + + public QueryPaginationOptions QueryPaginationOptions { get; } + + public int MaxConcurrency { get; } + + public InitializationParameters( + IDocumentContainer documentContainer, + SqlQuerySpec sqlQuerySpec, + IReadOnlyList targetRanges, + PartitionKey? partitionKey, + IReadOnlyList orderByColumns, + QueryPaginationOptions queryPaginationOptions, + int maxConcurrency) + { + this.DocumentContainer = documentContainer ?? throw new ArgumentNullException(nameof(documentContainer)); + this.SqlQuerySpec = sqlQuerySpec ?? throw new ArgumentNullException(nameof(sqlQuerySpec)); + this.TargetRanges = targetRanges ?? throw new ArgumentNullException(nameof(targetRanges)); + this.PartitionKey = partitionKey; + this.OrderByColumns = orderByColumns ?? throw new ArgumentNullException(nameof(orderByColumns)); + this.QueryPaginationOptions = queryPaginationOptions ?? throw new ArgumentNullException(nameof(queryPaginationOptions)); + this.MaxConcurrency = maxConcurrency; + } + } + + private sealed class QueryPageParameters + { + public string ActivityId { get; } + + public Lazy CosmosQueryExecutionInfo { get; } + + public DistributionPlanSpec DistributionPlanSpec { get; } + + public IReadOnlyDictionary AdditionalHeaders { get; } + + public QueryPageParameters( + string activityId, + Lazy cosmosQueryExecutionInfo, + DistributionPlanSpec distributionPlanSpec, + IReadOnlyDictionary additionalHeaders) + { + this.ActivityId = activityId ?? throw new ArgumentNullException(nameof(activityId)); + this.CosmosQueryExecutionInfo = cosmosQueryExecutionInfo; + this.DistributionPlanSpec = distributionPlanSpec; + this.AdditionalHeaders = additionalHeaders; + } + } + + private enum ExecutionState + { + Uninitialized, + Initialized + } + + private readonly InitializationParameters initializationParameters; + + private ExecutionState state; + + private Queue bufferedPages; + + private TryCatch inner; + + public TryCatch Current => this.GetCurrentPage(); + + private OrderByCrossPartitionQueryPipelineStage(InitializationParameters initializationParameters) + { + this.initializationParameters = initializationParameters ?? throw new ArgumentNullException(nameof(initializationParameters)); + this.state = ExecutionState.Uninitialized; + this.bufferedPages = new Queue(); + } + + private TryCatch GetCurrentPage() + { + if (this.state == ExecutionState.Uninitialized) + { + throw new InvalidOperationException("MoveNextAsync must be called before accessing the Current property."); + } + + if (this.bufferedPages.Count != 0) + { + return TryCatch.FromResult(this.bufferedPages.Peek()); + } + + return this.inner.Try(pipelineStage => pipelineStage.Current); + } + + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) + { + if (this.state == ExecutionState.Uninitialized) + { + // Note: when we set the state to initialized here, we no longer allowing a retry for these failures + // To allow retries, we must not set the state to initialized until construction of the inner pipeline succeeds + (this.inner, this.bufferedPages) = await MoveNextAsync_InitializeAsync(this.initializationParameters, trace, cancellationToken); + this.state = ExecutionState.Initialized; + + if (this.bufferedPages.Count > 0) + { + return true; + } + } + + if (this.bufferedPages.Count > 0) + { + this.bufferedPages.Dequeue(); + if (this.bufferedPages.Count > 0) + { + return true; + } + } + + TryCatch hasNext = await this.inner.TryAsync(pipelineStage => pipelineStage.MoveNextAsync(trace, cancellationToken)); + return hasNext.Succeeded && hasNext.Result; + } + + public ValueTask DisposeAsync() + { + if (this.state == ExecutionState.Initialized && this.inner.Succeeded) + { + return this.inner.Result.DisposeAsync(); + } + + return default; + } + + public static TryCatch MonadicCreate( + IDocumentContainer documentContainer, + SqlQuerySpec sqlQuerySpec, + IReadOnlyList targetRanges, + Cosmos.PartitionKey? partitionKey, + IReadOnlyList orderByColumns, + QueryPaginationOptions queryPaginationOptions, + int maxConcurrency, + CosmosElement continuationToken) + { + if (documentContainer == null) + { + throw new ArgumentNullException(nameof(documentContainer)); + } + + if (sqlQuerySpec == null) + { + throw new ArgumentNullException(nameof(sqlQuerySpec)); + } + + if (targetRanges == null) + { + throw new ArgumentNullException(nameof(targetRanges)); + } + + if (targetRanges.Count == 0) + { + throw new ArgumentException($"{nameof(targetRanges)} must not be empty."); + } + + if (orderByColumns == null) + { + throw new ArgumentNullException(nameof(orderByColumns)); + } + + if (orderByColumns.Count == 0) + { + throw new ArgumentException($"{nameof(orderByColumns)} must not be empty."); + } + + if (continuationToken != null) + { + return StreamingOrderByCrossPartitionQueryPipelineStage.MonadicCreate( + documentContainer, + sqlQuerySpec, + targetRanges, + partitionKey, + orderByColumns, + queryPaginationOptions, + maxConcurrency, + continuationToken); + } + + InitializationParameters init = new InitializationParameters( + documentContainer, + sqlQuerySpec, + targetRanges, + partitionKey, + orderByColumns, + queryPaginationOptions, + maxConcurrency); + + return TryCatch.FromResult(new OrderByCrossPartitionQueryPipelineStage(init)); + } + + private static async ValueTask<(TryCatch, Queue)> MoveNextAsync_InitializeAsync(InitializationParameters init, ITrace trace, CancellationToken cancellationToken) + { + SqlQuerySpec rewrittenQueryForOrderBy = new SqlQuerySpec( + init.SqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), + init.SqlQuerySpec.Parameters); + + List uninitializedEnumerators = init.TargetRanges + .Select(range => OrderByQueryPartitionRangePageAsyncEnumerator.Create( + init.DocumentContainer, + rewrittenQueryForOrderBy, + new FeedRangeState(range, state: default), + init.PartitionKey, + init.QueryPaginationOptions, + TrueFilter, + PrefetchPolicy.PrefetchSinglePage)) + .ToList(); + + Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)> uninitializedEnumeratorsAndTokens = new Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)>( + uninitializedEnumerators + .Select(x => (x, (OrderByContinuationToken)null))); + + await ParallelPrefetch.PrefetchInParallelAsync(uninitializedEnumerators, init.MaxConcurrency, trace, cancellationToken); + + IReadOnlyList sortOrders = init.OrderByColumns.Select(column => column.SortOrder).ToList(); + PriorityQueue initializedEnumerators = new PriorityQueue(new OrderByEnumeratorComparer(sortOrders)); + Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)> enumeratorsAndTokens = new Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)>(); + + bool nonStreaming = false; + Queue bufferedPages = new Queue(); + QueryPageParameters queryPageParameters = null; + while (uninitializedEnumeratorsAndTokens.Count != 0) + { + (OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token) = uninitializedEnumeratorsAndTokens.Dequeue(); + if (await enumerator.MoveNextAsync(trace, cancellationToken)) + { + if (enumerator.Current.Failed) + { + if (IsSplitException(enumerator.Current.Exception)) + { + await MoveNextAsync_InitializeAsync_HandleSplitAsync( + init.DocumentContainer, + uninitializedEnumeratorsAndTokens, + enumerator, + token, + trace, + cancellationToken); + + continue; + } + else + { + // early return + return (TryCatch.FromException(enumerator.Current.Exception), bufferedPages); + } + } + + QueryPage page = enumerator.Current.Result.Page; + if (queryPageParameters == null) + { + // It is difficult to merge the headers because the type is not strong enough to support merging. + // Moreover, the existing code also does not merge the headers. + // Instead they grab the headers at random from some pages and send them onwards. + queryPageParameters = new QueryPageParameters( + activityId: page.ActivityId, + cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, + distributionPlanSpec: page.DistributionPlanSpec, + additionalHeaders: page.AdditionalHeaders); + } + + // For backwards compatibility the default value of streaming for ORDER BY is _true_ + nonStreaming = nonStreaming || (!page.Streaming.GetValueOrDefault(true) && (page.State != null)); + + if (enumerator.Current.Result.Enumerator.MoveNext()) + { + // the page is non-empty then we need to enqueue the enumerator in the PriorityQueue + initializedEnumerators.Enqueue(enumerator); + } + else + { + enumeratorsAndTokens.Enqueue((enumerator, token)); + } + + // Ensure proper reporting of query charges + bufferedPages.Enqueue(new QueryPage( + documents: EmptyPage, + requestCharge: page.RequestCharge, + activityId: page.ActivityId, + cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, + distributionPlanSpec: page.DistributionPlanSpec, + disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, + additionalHeaders: page.AdditionalHeaders, + state: InitializingQueryState, + streaming: page.Streaming)); + } + } + + IQueryPipelineStage pipelineStage; + if (nonStreaming) + { + Queue orderbyEnumerators = new Queue(); + foreach ((OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken _) in enumeratorsAndTokens) + { + OrderByQueryPartitionRangePageAsyncEnumerator bufferedEnumerator = enumerator.CloneAsFullyBufferedEnumerator(); + orderbyEnumerators.Enqueue(bufferedEnumerator); + } + + foreach (OrderByQueryPartitionRangePageAsyncEnumerator initializedEnumerator in initializedEnumerators) + { + OrderByQueryPartitionRangePageAsyncEnumerator bufferedEnumerator = initializedEnumerator.CloneAsFullyBufferedEnumerator(); + orderbyEnumerators.Enqueue(bufferedEnumerator); + } + + await ParallelPrefetch.PrefetchInParallelAsync(orderbyEnumerators, init.MaxConcurrency, trace, cancellationToken); + + pipelineStage = await NonStreamingOrderByPipelineStage.CreateAsync( + init.QueryPaginationOptions, + sortOrders, + orderbyEnumerators, + queryPageParameters, + trace, + cancellationToken); + } + else + { + pipelineStage = StreamingOrderByCrossPartitionQueryPipelineStage.Create( + init.DocumentContainer, + sortOrders, + initializedEnumerators, + enumeratorsAndTokens, + init.QueryPaginationOptions, + init.MaxConcurrency); + } + + return (TryCatch.FromResult(pipelineStage), bufferedPages); + } + + private static async ValueTask MoveNextAsync_InitializeAsync_HandleSplitAsync( + IDocumentContainer documentContainer, + Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)> uninitializedEnumeratorsAndTokens, + OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, + OrderByContinuationToken token, + ITrace trace, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + IReadOnlyList childRanges = await documentContainer.GetChildRangeAsync( + uninitializedEnumerator.FeedRangeState.FeedRange, + trace, + cancellationToken); + + if (childRanges.Count <= 1) + { + // We optimistically assumed that the cache is not stale. + // In the event that it is (where we only get back one child / the partition that we think got split) + // Then we need to refresh the cache + await documentContainer.RefreshProviderAsync(trace, cancellationToken); + childRanges = await documentContainer.GetChildRangeAsync( + uninitializedEnumerator.FeedRangeState.FeedRange, + trace, + cancellationToken); + } + + if (childRanges.Count < 1) + { + string errorMessage = "SDK invariant violated 82086B2D: Must have at least one EPK range in a cross partition enumerator"; + throw Resource.CosmosExceptions.CosmosExceptionFactory.CreateInternalServerErrorException( + message: errorMessage, + headers: null, + stackTrace: null, + trace: trace, + error: new Microsoft.Azure.Documents.Error { Code = "SDK_invariant_violated_82086B2D", Message = errorMessage }); + } + + if (childRanges.Count == 1) + { + // On a merge, the 410/1002 results in a single parent + // We maintain the current enumerator's range and let the RequestInvokerHandler logic kick in + OrderByQueryPartitionRangePageAsyncEnumerator childPaginator = OrderByQueryPartitionRangePageAsyncEnumerator.Create( + documentContainer, + uninitializedEnumerator.SqlQuerySpec, + new FeedRangeState(uninitializedEnumerator.FeedRangeState.FeedRange, uninitializedEnumerator.StartOfPageState), + partitionKey: null, + uninitializedEnumerator.QueryPaginationOptions, + uninitializedEnumerator.Filter, + PrefetchPolicy.PrefetchSinglePage); + uninitializedEnumeratorsAndTokens.Enqueue((childPaginator, token)); + } + else + { + // Split + foreach (FeedRangeInternal childRange in childRanges) + { + cancellationToken.ThrowIfCancellationRequested(); + + OrderByQueryPartitionRangePageAsyncEnumerator childPaginator = OrderByQueryPartitionRangePageAsyncEnumerator.Create( + documentContainer, + uninitializedEnumerator.SqlQuerySpec, + new FeedRangeState(childRange, uninitializedEnumerator.StartOfPageState), + partitionKey: null, + uninitializedEnumerator.QueryPaginationOptions, + uninitializedEnumerator.Filter, + PrefetchPolicy.PrefetchSinglePage); + uninitializedEnumeratorsAndTokens.Enqueue((childPaginator, token)); + } + } + } + + private static bool IsSplitException(Exception exception) + { + while (exception.InnerException != null) + { + exception = exception.InnerException; + } + + return exception.IsPartitionSplitException(); + } + + /// + /// This class is responsible for draining cross partition queries that have order by conditions. + /// The way order by queries work is that they are doing a k-way merge of sorted lists from each partition with an added condition. + /// The added condition is that if 2 or more top documents from different partitions are equivalent then we drain from the left most partition first. + /// This way we can generate a single continuation token for all n partitions. + /// This class is able to stop and resume execution by generating continuation tokens and reconstructing an execution context from said token. + /// + private sealed class StreamingOrderByCrossPartitionQueryPipelineStage : IQueryPipelineStage + { + private readonly IDocumentContainer documentContainer; + private readonly IReadOnlyList sortOrders; + private readonly PriorityQueue enumerators; + private readonly Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)> uninitializedEnumeratorsAndTokens; + private readonly QueryPaginationOptions queryPaginationOptions; + private readonly int maxConcurrency; + + private QueryState state; + private bool returnedFinalPage; + + private static class Expressions + { + public const string LessThan = "<"; + public const string LessThanOrEqualTo = "<="; + public const string EqualTo = "="; + public const string GreaterThan = ">"; + public const string GreaterThanOrEqualTo = ">="; + public const string True = "true"; + public const string False = "false"; + } + + private StreamingOrderByCrossPartitionQueryPipelineStage( + IDocumentContainer documentContainer, + IReadOnlyList sortOrders, + QueryPaginationOptions queryPaginationOptions, + int maxConcurrency, + IEnumerable<(OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken)> uninitializedEnumeratorsAndTokens, + QueryState state) + { + this.documentContainer = documentContainer ?? throw new ArgumentNullException(nameof(documentContainer)); + this.sortOrders = sortOrders ?? throw new ArgumentNullException(nameof(sortOrders)); + this.enumerators = new PriorityQueue(new OrderByEnumeratorComparer(this.sortOrders)); + this.queryPaginationOptions = queryPaginationOptions ?? QueryPaginationOptions.Default; + this.maxConcurrency = maxConcurrency < 0 ? throw new ArgumentOutOfRangeException($"{nameof(maxConcurrency)} must be a non negative number.") : maxConcurrency; + this.uninitializedEnumeratorsAndTokens = new Queue<(OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken)>(uninitializedEnumeratorsAndTokens ?? throw new ArgumentNullException(nameof(uninitializedEnumeratorsAndTokens))); + this.state = state ?? InitializingQueryState; + } + + private StreamingOrderByCrossPartitionQueryPipelineStage( + IDocumentContainer documentContainer, + IReadOnlyList sortOrders, + PriorityQueue enumerators, + Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)> uninitializedEnumeratorsAndTokens, + QueryPaginationOptions queryPaginationOptions, + int maxConcurrency) + { + this.documentContainer = documentContainer ?? throw new ArgumentNullException(nameof(documentContainer)); + this.sortOrders = sortOrders ?? throw new ArgumentNullException(nameof(sortOrders)); + this.enumerators = enumerators ?? throw new ArgumentNullException(nameof(enumerators)); + this.uninitializedEnumeratorsAndTokens = uninitializedEnumeratorsAndTokens ?? throw new ArgumentNullException(nameof(uninitializedEnumeratorsAndTokens)); + this.queryPaginationOptions = queryPaginationOptions ?? throw new ArgumentNullException(nameof(queryPaginationOptions)); + this.maxConcurrency = maxConcurrency; + this.state = InitializingQueryState; + } + + public TryCatch Current { get; private set; } + + public ValueTask DisposeAsync() + { + return default; + } + + private async ValueTask MoveNextAsync_Initialize_FromBeginningAsync( + OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, + ITrace trace, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (uninitializedEnumerator == null) + { + throw new ArgumentNullException(nameof(uninitializedEnumerator)); + } + + // We need to prime the page + if (!await uninitializedEnumerator.MoveNextAsync(trace, cancellationToken)) + { + // No more documents, so just return an empty page + this.Current = TryCatch.FromResult( + new QueryPage( + documents: EmptyPage, + requestCharge: 0, + activityId: string.Empty, + cosmosQueryExecutionInfo: default, + distributionPlanSpec: default, + disallowContinuationTokenMessage: default, + additionalHeaders: default, + state: this.state, + streaming: true)); + return true; + } + + if (uninitializedEnumerator.Current.Failed) + { + if (IsSplitException(uninitializedEnumerator.Current.Exception)) + { + return await this.MoveNextAsync_InitializeAsync_HandleSplitAsync(uninitializedEnumerator, token: null, trace, cancellationToken); + } + + this.uninitializedEnumeratorsAndTokens.Enqueue((uninitializedEnumerator, token: null)); + this.Current = TryCatch.FromException(uninitializedEnumerator.Current.Exception); + } + else + { + QueryPage page = uninitializedEnumerator.Current.Result.Page; + + if (!uninitializedEnumerator.Current.Result.Enumerator.MoveNext()) + { + // Page was empty + if (uninitializedEnumerator.FeedRangeState.State != null) + { + this.uninitializedEnumeratorsAndTokens.Enqueue((uninitializedEnumerator, token: null)); + } + + if ((this.uninitializedEnumeratorsAndTokens.Count == 0) && (this.enumerators.Count == 0)) + { + // Query did not match any results. We need to emit a fake empty page with null continuation + this.Current = TryCatch.FromResult( + new QueryPage( + documents: EmptyPage, + requestCharge: page.RequestCharge, + activityId: string.IsNullOrEmpty(page.ActivityId) ? Guid.NewGuid().ToString() : page.ActivityId, + cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, + distributionPlanSpec: default, + disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, + additionalHeaders: page.AdditionalHeaders, + state: null, + streaming: page.Streaming)); + this.returnedFinalPage = true; + return true; + } + } + else + { + this.enumerators.Enqueue(uninitializedEnumerator); + } + + // Just return an empty page with the stats + this.Current = TryCatch.FromResult( + new QueryPage( + documents: EmptyPage, + requestCharge: page.RequestCharge, + activityId: page.ActivityId, + cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, + distributionPlanSpec: default, + disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, + additionalHeaders: page.AdditionalHeaders, + state: this.state, + page.Streaming)); + } + + return true; + } + + private async ValueTask MoveNextAsync_Initialize_FilterAsync( + OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, + OrderByContinuationToken token, + ITrace trace, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (uninitializedEnumerator == null) + { + throw new ArgumentNullException(nameof(uninitializedEnumerator)); + } + + if (token == null) + { + throw new ArgumentNullException(nameof(token)); + } + + TryCatch<(bool, int, TryCatch)> filterMonad = await FilterNextAsync( + uninitializedEnumerator, + this.sortOrders, + token, + trace, + cancellationToken: default); + + if (filterMonad.Failed) + { + if (IsSplitException(filterMonad.Exception)) + { + return await this.MoveNextAsync_InitializeAsync_HandleSplitAsync(uninitializedEnumerator, token, trace, cancellationToken); + } + + this.Current = TryCatch.FromException(filterMonad.Exception); + return true; + } + + (bool doneFiltering, int itemsLeftToSkip, TryCatch monadicQueryByPage) = filterMonad.Result; + QueryPage page = uninitializedEnumerator.Current.Result.Page; + if (doneFiltering) + { + if (uninitializedEnumerator.Current.Result.Enumerator.Current != null) + { + this.enumerators.Enqueue(uninitializedEnumerator); + } + else if ((this.uninitializedEnumeratorsAndTokens.Count == 0) && (this.enumerators.Count == 0)) + { + // Query did not match any results. + // We need to emit a fake empty page with null continuation + this.Current = TryCatch.FromResult( + new QueryPage( + documents: EmptyPage, + requestCharge: page.RequestCharge, + activityId: string.IsNullOrEmpty(page.ActivityId) ? Guid.NewGuid().ToString() : page.ActivityId, + cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, + distributionPlanSpec: default, + disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, + additionalHeaders: page.AdditionalHeaders, + state: null, + streaming: page.Streaming)); + this.returnedFinalPage = true; + return true; + } + } + else + { + if (monadicQueryByPage.Failed) + { + if (IsSplitException(filterMonad.Exception)) + { + return await this.MoveNextAsync_InitializeAsync_HandleSplitAsync(uninitializedEnumerator, token, trace, cancellationToken); + } + } + + if (uninitializedEnumerator.FeedRangeState.State != default) + { + // We need to update the token + OrderByContinuationToken modifiedToken = new OrderByContinuationToken( + new ParallelContinuationToken( + ((CosmosString)uninitializedEnumerator.FeedRangeState.State.Value).Value, + ((FeedRangeEpk)uninitializedEnumerator.FeedRangeState.FeedRange).Range), + token.OrderByItems, + token.ResumeValues, + token.Rid, + itemsLeftToSkip, + token.Filter); + this.uninitializedEnumeratorsAndTokens.Enqueue((uninitializedEnumerator, modifiedToken)); + CosmosElement cosmosElementOrderByContinuationToken = OrderByContinuationToken.ToCosmosElement(modifiedToken); + CosmosArray continuationTokenList = CosmosArray.Create(new List() { cosmosElementOrderByContinuationToken }); + this.state = new QueryState(continuationTokenList); + } + } + + // Just return an empty page with the stats + this.Current = TryCatch.FromResult( + new QueryPage( + documents: EmptyPage, + requestCharge: page.RequestCharge, + activityId: page.ActivityId, + cosmosQueryExecutionInfo: page.CosmosQueryExecutionInfo, + distributionPlanSpec: default, + disallowContinuationTokenMessage: page.DisallowContinuationTokenMessage, + additionalHeaders: page.AdditionalHeaders, + state: InitializingQueryState, + streaming: page.Streaming)); + + return true; + } + + private async ValueTask MoveNextAsync_InitializeAsync_HandleSplitAsync( + OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, + OrderByContinuationToken token, + ITrace trace, + CancellationToken cancellationToken) + { + await OrderByCrossPartitionQueryPipelineStage.MoveNextAsync_InitializeAsync_HandleSplitAsync( + this.documentContainer, + this.uninitializedEnumeratorsAndTokens, + uninitializedEnumerator, + token, + trace, + cancellationToken); + + // Recursively retry + return await this.MoveNextAsync(trace, cancellationToken); + } + + private async ValueTask MoveNextAsync_InitializeAsync(ITrace trace, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + await ParallelPrefetch.PrefetchInParallelAsync( + this.uninitializedEnumeratorsAndTokens.Select(value => value.enumerator), + this.maxConcurrency, + trace, + cancellationToken); + (OrderByQueryPartitionRangePageAsyncEnumerator uninitializedEnumerator, OrderByContinuationToken token) = this.uninitializedEnumeratorsAndTokens.Dequeue(); + bool movedNext = token is null + ? await this.MoveNextAsync_Initialize_FromBeginningAsync(uninitializedEnumerator, trace, cancellationToken) + : await this.MoveNextAsync_Initialize_FilterAsync(uninitializedEnumerator, token, trace, cancellationToken); + return movedNext; + } + + private ValueTask MoveNextAsync_DrainPageAsync(ITrace trace, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (trace == null) + { + throw new ArgumentNullException(nameof(trace)); + } + + OrderByQueryPartitionRangePageAsyncEnumerator currentEnumerator = default; + OrderByQueryResult orderByQueryResult = default; + + // Try to form a page with as many items in the sorted order without having to do async work. + List results = new List(); + while (results.Count < this.queryPaginationOptions.PageSizeLimit.GetValueOrDefault(int.MaxValue)) + { + currentEnumerator = this.enumerators.Dequeue(); + orderByQueryResult = new OrderByQueryResult(currentEnumerator.Current.Result.Enumerator.Current); + results.Add(orderByQueryResult); + + if (!currentEnumerator.Current.Result.Enumerator.MoveNext()) + { + // The order by page ran out of results + if (currentEnumerator.FeedRangeState.State != null) + { + // If the continuation isn't null + // then mark the enumerator as unitialized and it will get requeueed on the next iteration with a fresh page. + this.uninitializedEnumeratorsAndTokens.Enqueue((currentEnumerator, (OrderByContinuationToken)null)); + + // Use the token for the next page, since we fully drained the enumerator. + OrderByContinuationToken orderByContinuationToken = CreateOrderByContinuationToken( + new ParallelContinuationToken( + token: ((CosmosString)currentEnumerator.FeedRangeState.State.Value).Value, + range: ((FeedRangeEpk)currentEnumerator.FeedRangeState.FeedRange).Range), + orderByQueryResult, + skipCount: 0, + filter: currentEnumerator.Filter); + + CosmosElement cosmosElementOrderByContinuationToken = OrderByContinuationToken.ToCosmosElement(orderByContinuationToken); + CosmosArray continuationTokenList = CosmosArray.Create(new List() { cosmosElementOrderByContinuationToken }); + + this.state = new QueryState(continuationTokenList); + + // Return a page of results + // No stats to report, since we already reported it when we moved to this page. + this.Current = TryCatch.FromResult( + new QueryPage( + documents: results.Select(result => result.Payload).ToList(), + requestCharge: 0, + activityId: default, + cosmosQueryExecutionInfo: default, + distributionPlanSpec: default, + disallowContinuationTokenMessage: default, + additionalHeaders: currentEnumerator.Current.Result.Page.AdditionalHeaders, + state: this.state, + streaming: true)); + return new ValueTask(true); + } + + // Todo: we can optimize this by having a special "Done" continuation token + // so we don't grab a full page and filter it through + // but this would break older clients, so wait for a compute only fork. + + break; + } + + this.enumerators.Enqueue(currentEnumerator); + } + + // It is possible that we emit multiple documents with the same rid due to JOIN queries. + // This means it is not enough to serialize the rid that we left on to resume the query. + // We need to also serialize the number of documents with that rid, so we can skip it when resuming + int skipCount = results.Where(result => string.Equals(result.Rid, orderByQueryResult.Rid)).Count(); + + // Create the continuation token. + CosmosElement state; + if ((this.enumerators.Count == 0) && (this.uninitializedEnumeratorsAndTokens.Count == 0)) + { + state = null; + } + else + { + OrderByContinuationToken orderByContinuationToken = CreateOrderByContinuationToken( + new ParallelContinuationToken( + token: currentEnumerator.StartOfPageState != null ? ((CosmosString)currentEnumerator.StartOfPageState.Value).Value : null, + range: ((FeedRangeEpk)currentEnumerator.FeedRangeState.FeedRange).Range), + orderByQueryResult, + skipCount, + currentEnumerator.Filter); + + CosmosElement cosmosElementOrderByContinuationToken = OrderByContinuationToken.ToCosmosElement(orderByContinuationToken); + CosmosArray continuationTokenList = CosmosArray.Create(new List() { cosmosElementOrderByContinuationToken }); + + state = continuationTokenList; + } + + this.state = state != null ? new QueryState(state) : null; + + // Return a page of results + // No stats to report, since we already reported it when we moved to this page. + this.Current = TryCatch.FromResult( + new QueryPage( + documents: results.Select(result => result.Payload).ToList(), + requestCharge: 0, + activityId: default, + cosmosQueryExecutionInfo: default, + distributionPlanSpec: default, + disallowContinuationTokenMessage: default, + additionalHeaders: currentEnumerator?.Current.Result.Page.AdditionalHeaders, + state: this.state, + streaming: true)); + + if (state == null) + { + this.returnedFinalPage = true; + } + + return new ValueTask(true); + } + + //// In order to maintain the continuation token for the user we must drain with a few constraints + //// 1) We always drain from the partition, which has the highest priority item first + //// 2) If multiple partitions have the same priority item then we drain from the left most first + //// otherwise we would need to keep track of how many of each item we drained from each partition + //// (just like parallel queries). + //// Visually that look the following case where we have three partitions that are numbered and store letters. + //// For teaching purposes I have made each item a tuple of the following form: + //// + //// So that duplicates across partitions are distinct, but duplicates within partitions are indistinguishable. + //// |-------| |-------| |-------| + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// | | | | | | + //// |-------| |-------| |-------| + //// Now the correct drain order in this case is: + //// ,,,,,,,,,,, + //// ,,,,,,,,, + //// In more mathematical terms + //// 1) always comes before where x < z + //// 2) always come before where j < k + public ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (trace == null) + { + throw new ArgumentNullException(nameof(trace)); + } + + if (this.uninitializedEnumeratorsAndTokens.Count != 0) + { + return this.MoveNextAsync_InitializeAsync(trace, cancellationToken); + } + + if (this.enumerators.Count == 0) + { + if (!this.returnedFinalPage) + { + // return a empty page with null continuation token + this.Current = TryCatch.FromResult( + new QueryPage( + documents: EmptyPage, + requestCharge: 0, + activityId: Guid.NewGuid().ToString(), + cosmosQueryExecutionInfo: default, + distributionPlanSpec: default, + disallowContinuationTokenMessage: default, + additionalHeaders: default, + state: default, + streaming: true)); + this.returnedFinalPage = true; + return new ValueTask(true); + } + + // Finished draining. + return new ValueTask(false); + } + + return this.MoveNextAsync_DrainPageAsync(trace, cancellationToken); + } + + public static IQueryPipelineStage Create( + IDocumentContainer documentContainer, + IReadOnlyList sortOrders, + PriorityQueue enumerators, + Queue<(OrderByQueryPartitionRangePageAsyncEnumerator enumerator, OrderByContinuationToken token)> uninitializedEnumeratorsAndTokens, + QueryPaginationOptions queryPaginationOptions, + int maxConcurrency) + { + return new StreamingOrderByCrossPartitionQueryPipelineStage( + documentContainer, + sortOrders, + enumerators, + uninitializedEnumeratorsAndTokens, + queryPaginationOptions, + maxConcurrency); + } + + public static TryCatch MonadicCreate( + IDocumentContainer documentContainer, + SqlQuerySpec sqlQuerySpec, + IReadOnlyList targetRanges, + Cosmos.PartitionKey? partitionKey, + IReadOnlyList orderByColumns, + QueryPaginationOptions queryPaginationOptions, + int maxConcurrency, + CosmosElement continuationToken) + { + // TODO (brchon): For now we are not honoring non deterministic ORDER BY queries, since there is a bug in the continuation logic. + // We can turn it back on once the bug is fixed. + // This shouldn't hurt any query results. + + List<(OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken)> enumeratorsAndTokens; + if (continuationToken == null) + { + // Start off all the partition key ranges with null continuation + SqlQuerySpec rewrittenQueryForOrderBy = new SqlQuerySpec( + sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), + sqlQuerySpec.Parameters); + + enumeratorsAndTokens = targetRanges + .Select(range => (OrderByQueryPartitionRangePageAsyncEnumerator.Create( + documentContainer, + rewrittenQueryForOrderBy, + new FeedRangeState(range, state: default), + partitionKey, + queryPaginationOptions, + TrueFilter, + PrefetchPolicy.PrefetchSinglePage), + (OrderByContinuationToken)null)) + .ToList(); + } + else + { + TryCatch> monadicGetOrderByContinuationTokenMapping = MonadicGetOrderByContinuationTokenMapping( + targetRanges, + continuationToken, + orderByColumns.Count); + if (monadicGetOrderByContinuationTokenMapping.Failed) + { + return TryCatch.FromException(monadicGetOrderByContinuationTokenMapping.Exception); + } + + PartitionMapper.PartitionMapping partitionMapping = monadicGetOrderByContinuationTokenMapping.Result; + + OrderByContinuationToken targetContinuationToken = partitionMapping.TargetMapping.Values.First(); + + int orderByResumeValueCount = 0; + IReadOnlyList resumeValues; + IReadOnlyList orderByItems; + if (targetContinuationToken.ResumeValues != null) + { + // Use SqlQueryResumeValue for continuation if it is present. + resumeValues = targetContinuationToken.ResumeValues; + orderByItems = null; + orderByResumeValueCount = resumeValues.Count; + } + else + { + // If continuation token has only OrderByItems, check if it can be converted to SqlQueryResumeValue. This will + // help avoid re-writing the query. Conversion will work as long as the order by item type is a supported type. + orderByResumeValueCount = targetContinuationToken.OrderByItems.Count; + + if (ContainsSupportedResumeTypes(targetContinuationToken.OrderByItems)) + { + // Convert the order by items to SqlQueryResumeValue + List generatedResumeValues = new List(targetContinuationToken.OrderByItems.Count); + //foreach (CosmosElement orderByItem in orderByItems) + foreach (OrderByItem orderByItem in targetContinuationToken.OrderByItems) + { + generatedResumeValues.Add(SqlQueryResumeValue.FromOrderByValue(orderByItem.Item)); + } + + resumeValues = generatedResumeValues; + orderByItems = null; + } + else + { + orderByItems = targetContinuationToken.OrderByItems.Select(x => x.Item).ToList(); + resumeValues = null; + } + } + + if (orderByResumeValueCount != orderByColumns.Count) + { + return TryCatch.FromException( + new MalformedContinuationTokenException( + $"Order By Items from continuation token did not match the query text. " + + $"Order by item count: {orderByResumeValueCount} did not match column count {orderByColumns.Count()}. " + + $"Continuation token: {targetContinuationToken}")); + } + + enumeratorsAndTokens = new List<(OrderByQueryPartitionRangePageAsyncEnumerator, OrderByContinuationToken)>(); + if (resumeValues != null) + { + // Continuation contains resume values, so update SqlQuerySpec to include SqlQueryResumeFilter which + // will specify the resume point to the backend. This avoid having to re-write the query. + + // Process partitions left of Target. The resume values in these partition have + // already been processed so exclude flag is set to true. + SqlQuerySpec leftQuerySpec = new SqlQuerySpec( + sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), + sqlQuerySpec.Parameters, + new SqlQueryResumeFilter(resumeValues, null, true)); + + foreach (KeyValuePair kvp in partitionMapping.MappingLeftOfTarget) + { + FeedRangeEpk range = kvp.Key; + OrderByContinuationToken token = kvp.Value; + OrderByQueryPartitionRangePageAsyncEnumerator remoteEnumerator = OrderByQueryPartitionRangePageAsyncEnumerator.Create( + documentContainer, + leftQuerySpec, + new FeedRangeState(range, token?.ParallelContinuationToken?.Token != null ? new QueryState(CosmosString.Create(token.ParallelContinuationToken.Token)) : null), + partitionKey, + queryPaginationOptions, + filter: null, + PrefetchPolicy.PrefetchSinglePage); + + enumeratorsAndTokens.Add((remoteEnumerator, token)); + } + + // Process Target Partitions which is the last partition from which data has been returned. + // For this partition the Rid value needs to be set if present. Exclude flag is not set as the document + // matching the Rid will be skipped in SDK based on SkipCount value. + // Backend requests can contains both SqlQueryResumeFilter and ContinuationToken and the backend will pick + // the resume point that is bigger i.e. most restrictive + foreach (KeyValuePair kvp in partitionMapping.TargetMapping) + { + FeedRangeEpk range = kvp.Key; + OrderByContinuationToken token = kvp.Value; + + SqlQuerySpec targetQuerySpec = new SqlQuerySpec( + sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), + sqlQuerySpec.Parameters, + new SqlQueryResumeFilter(resumeValues, token?.Rid, false)); + + OrderByQueryPartitionRangePageAsyncEnumerator remoteEnumerator = OrderByQueryPartitionRangePageAsyncEnumerator.Create( + documentContainer, + targetQuerySpec, + new FeedRangeState(range, token?.ParallelContinuationToken?.Token != null ? new QueryState(CosmosString.Create(token.ParallelContinuationToken.Token)) : null), + partitionKey, + queryPaginationOptions, + filter: null, + PrefetchPolicy.PrefetchSinglePage); + + enumeratorsAndTokens.Add((remoteEnumerator, token)); + } + + // Process partitions right of target. The Resume value in these partitions have not been processed so the exclude value is set to false. + SqlQuerySpec rightQuerySpec = new SqlQuerySpec( + sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: TrueFilter), + sqlQuerySpec.Parameters, + new SqlQueryResumeFilter(resumeValues, null, false)); + + foreach (KeyValuePair kvp in partitionMapping.MappingRightOfTarget) + { + FeedRangeEpk range = kvp.Key; + OrderByContinuationToken token = kvp.Value; + OrderByQueryPartitionRangePageAsyncEnumerator remoteEnumerator = OrderByQueryPartitionRangePageAsyncEnumerator.Create( + documentContainer, + rightQuerySpec, + new FeedRangeState(range, token?.ParallelContinuationToken?.Token != null ? new QueryState(CosmosString.Create(token.ParallelContinuationToken.Token)) : null), + partitionKey, + queryPaginationOptions, + filter: null, + PrefetchPolicy.PrefetchSinglePage); + + enumeratorsAndTokens.Add((remoteEnumerator, token)); + } + } + else + { + // If continuation token doesn't have resume values or if order by items cannot be converted to resume values then + // rewrite the query filter to get the correct resume point + ReadOnlyMemory<(OrderByColumn, CosmosElement)> columnAndItems = orderByColumns.Zip(orderByItems, (column, item) => (column, item)).ToArray(); + + // For ascending order-by, left of target partition has filter expression > value, + // right of target partition has filter expression >= value, + // and target partition takes the previous filter from continuation (or true if no continuation) + (string leftFilter, string targetFilter, string rightFilter) = GetFormattedFilters(columnAndItems); + List<(IReadOnlyDictionary, string)> tokenMappingAndFilters = new List<(IReadOnlyDictionary, string)>() + { + { (partitionMapping.MappingLeftOfTarget, leftFilter) }, + { (partitionMapping.TargetMapping, targetFilter) }, + { (partitionMapping.MappingRightOfTarget, rightFilter) }, + }; + + foreach ((IReadOnlyDictionary tokenMapping, string filter) in tokenMappingAndFilters) + { + SqlQuerySpec rewrittenQueryForOrderBy = new SqlQuerySpec( + sqlQuerySpec.QueryText.Replace(oldValue: FormatPlaceHolder, newValue: filter), + sqlQuerySpec.Parameters); + + foreach (KeyValuePair kvp in tokenMapping) + { + FeedRangeEpk range = kvp.Key; + OrderByContinuationToken token = kvp.Value; + OrderByQueryPartitionRangePageAsyncEnumerator remoteEnumerator = OrderByQueryPartitionRangePageAsyncEnumerator.Create( + documentContainer, + rewrittenQueryForOrderBy, + new FeedRangeState(range, token?.ParallelContinuationToken?.Token != null ? new QueryState(CosmosString.Create(token.ParallelContinuationToken.Token)) : null), + partitionKey, + queryPaginationOptions, + filter, + PrefetchPolicy.PrefetchSinglePage); + + enumeratorsAndTokens.Add((remoteEnumerator, token)); + } + } + } + } + + StreamingOrderByCrossPartitionQueryPipelineStage stage = new StreamingOrderByCrossPartitionQueryPipelineStage( + documentContainer, + orderByColumns.Select(column => column.SortOrder).ToList(), + queryPaginationOptions, + maxConcurrency, + enumeratorsAndTokens, + continuationToken == null ? null : new QueryState(continuationToken)); + return TryCatch.FromResult(stage); + } + + private static TryCatch> MonadicGetOrderByContinuationTokenMapping( + IReadOnlyList partitionKeyRanges, + CosmosElement continuationToken, + int numOrderByItems) + { + if (partitionKeyRanges == null) + { + throw new ArgumentOutOfRangeException(nameof(partitionKeyRanges)); + } + + if (numOrderByItems < 0) + { + throw new ArgumentOutOfRangeException(nameof(numOrderByItems)); + } + + if (continuationToken == null) + { + throw new ArgumentNullException(nameof(continuationToken)); + } + + TryCatch> monadicExtractContinuationTokens = MonadicExtractOrderByTokens(continuationToken, numOrderByItems); + if (monadicExtractContinuationTokens.Failed) + { + return TryCatch>.FromException(monadicExtractContinuationTokens.Exception); + } + + return PartitionMapper.MonadicGetPartitionMapping( + partitionKeyRanges, + monadicExtractContinuationTokens.Result); + } + + private static TryCatch> MonadicExtractOrderByTokens( + CosmosElement continuationToken, + int numOrderByColumns) + { + if (continuationToken == null) + { + return TryCatch>.FromResult(default); + } + + if (!(continuationToken is CosmosArray cosmosArray)) + { + return TryCatch>.FromException( + new MalformedContinuationTokenException( + $"Order by continuation token must be an array: {continuationToken}.")); + } + + if (cosmosArray.Count == 0) + { + return TryCatch>.FromException( + new MalformedContinuationTokenException( + $"Order by continuation token cannot be empty: {continuationToken}.")); + } + + List orderByContinuationTokens = new List(); + foreach (CosmosElement arrayItem in cosmosArray) + { + TryCatch tryCreateOrderByContinuationToken = OrderByContinuationToken.TryCreateFromCosmosElement(arrayItem); + if (!tryCreateOrderByContinuationToken.Succeeded) + { + return TryCatch>.FromException(tryCreateOrderByContinuationToken.Exception); + } + + orderByContinuationTokens.Add(tryCreateOrderByContinuationToken.Result); + } + + foreach (OrderByContinuationToken suppliedOrderByContinuationToken in orderByContinuationTokens) + { + int orderByCount = GetOrderByItemCount(suppliedOrderByContinuationToken); + if (orderByCount != numOrderByColumns) + { + return TryCatch>.FromException( + new MalformedContinuationTokenException( + $"Invalid order-by items in continuation token {continuationToken} for OrderBy~Context.")); + } + } + + return TryCatch>.FromResult(orderByContinuationTokens); + } + + private static int GetOrderByItemCount(OrderByContinuationToken orderByContinuationToken) + { + return orderByContinuationToken.ResumeValues != null ? + orderByContinuationToken.ResumeValues.Count : orderByContinuationToken.OrderByItems.Count; + } + + private static void AppendToBuilders((StringBuilder leftFilter, StringBuilder targetFilter, StringBuilder rightFilter) builders, object str) + { + AppendToBuilders(builders, str, str, str); + } + + private static void AppendToBuilders((StringBuilder leftFilter, StringBuilder targetFilter, StringBuilder rightFilter) builders, object left, object target, object right) + { + builders.leftFilter.Append(left); + builders.targetFilter.Append(target); + builders.rightFilter.Append(right); + } + + private static (string leftFilter, string targetFilter, string rightFilter) GetFormattedFilters( + ReadOnlyMemory<(OrderByColumn orderByColumn, CosmosElement orderByItem)> columnAndItems) + { + // When we run cross partition queries, + // we only serialize the continuation token for the partition that we left off on. + // The only problem is that when we resume the order by query, + // we don't have continuation tokens for all other partition. + // The saving grace is that the data has a composite sort order(query sort order, partition key range id) + // so we can generate range filters which in turn the backend will turn into rid based continuation tokens, + // which is enough to get the streams of data flowing from all partitions. + // The details of how this is done is described below: + int numOrderByItems = columnAndItems.Length; + bool isSingleOrderBy = numOrderByItems == 1; + StringBuilder left = new StringBuilder(); + StringBuilder target = new StringBuilder(); + StringBuilder right = new StringBuilder(); + + (StringBuilder, StringBuilder, StringBuilder) builders = (left, target, right); + + if (isSingleOrderBy) + { + //For a single order by query we resume the continuations in this manner + // Suppose the query is SELECT* FROM c ORDER BY c.string ASC + // And we left off on partition N with the value "B" + // Then + // All the partitions to the left will have finished reading "B" + // Partition N is still reading "B" + // All the partitions to the right have let to read a "B + // Therefore the filters should be + // > "B" , >= "B", and >= "B" respectively + // Repeat the same logic for DESC and you will get + // < "B", <= "B", and <= "B" respectively + // The general rule becomes + // For ASC + // > for partitions to the left + // >= for the partition we left off on + // >= for the partitions to the right + // For DESC + // < for partitions to the left + // <= for the partition we left off on + // <= for the partitions to the right + (OrderByColumn orderByColumn, CosmosElement orderByItem) = columnAndItems.Span[0]; + (string expression, SortOrder sortOrder) = (orderByColumn.Expression, orderByColumn.SortOrder); + + AppendToBuilders(builders, "( "); + + // We need to add the filter for within the same type. + if (orderByItem is not CosmosUndefined) + { + StringBuilder sb = new StringBuilder(); + CosmosElementToQueryLiteral cosmosElementToQueryLiteral = new CosmosElementToQueryLiteral(sb); + orderByItem.Accept(cosmosElementToQueryLiteral); + + string orderByItemToString = sb.ToString(); + + left.Append($"{expression} {(sortOrder == SortOrder.Descending ? Expressions.LessThan : Expressions.GreaterThan)} {orderByItemToString}"); + target.Append($"{expression} {(sortOrder == SortOrder.Descending ? Expressions.LessThanOrEqualTo : Expressions.GreaterThanOrEqualTo)} {orderByItemToString}"); + right.Append($"{expression} {(sortOrder == SortOrder.Descending ? Expressions.LessThanOrEqualTo : Expressions.GreaterThanOrEqualTo)} {orderByItemToString}"); + } + else + { + // User is ordering by undefined, so we need to avoid a null reference exception. + + // What we really want is to support expression > undefined, + // but the engine evaluates to undefined instead of true or false, + // so we work around this by using the IS_DEFINED() system function. + + ComparisionWithUndefinedFilters filters = new ComparisionWithUndefinedFilters(expression); + left.Append($"{(sortOrder == SortOrder.Descending ? filters.LessThan : filters.GreaterThan)}"); + target.Append($"{(sortOrder == SortOrder.Descending ? filters.LessThanOrEqualTo : filters.GreaterThanOrEqualTo)}"); + right.Append($"{(sortOrder == SortOrder.Descending ? filters.LessThanOrEqualTo : filters.GreaterThanOrEqualTo)}"); + } + + // Now we need to include all the types that match the sort order. + ReadOnlyMemory isDefinedFunctions = orderByItem.Accept(CosmosElementToIsSystemFunctionsVisitor.Singleton, sortOrder == SortOrder.Ascending); + foreach (string isDefinedFunction in isDefinedFunctions.Span) + { + AppendToBuilders(builders, " OR "); + AppendToBuilders(builders, $"{isDefinedFunction}({expression})"); + } + + AppendToBuilders(builders, " )"); + } + else + { + //For a multi order by query + // Suppose the query is SELECT* FROM c ORDER BY c.string ASC, c.number ASC + // And we left off on partition N with the value("A", 1) + // Then + // All the partitions to the left will have finished reading("A", 1) + // Partition N is still reading("A", 1) + // All the partitions to the right have let to read a "(A", 1) + // The filters are harder to derive since their are multiple columns + // But the problem reduces to "How do you know one document comes after another in a multi order by query" + // The answer is to just look at it one column at a time. + // For this particular scenario: + // If a first column is greater ex. ("B", blah), then the document comes later in the sort order + // Therefore we want all documents where the first column is greater than "A" which means > "A" + // Or if the first column is a tie, then you look at the second column ex. ("A", blah). + // Therefore we also want all documents where the first column was a tie but the second column is greater which means = "A" AND > 1 + // Therefore the filters should be + // (> "A") OR (= "A" AND > 1), (> "A") OR (= "A" AND >= 1), (> "A") OR (= "A" AND >= 1) + // Notice that if we repeated the same logic we for single order by we would have gotten + // > "A" AND > 1, >= "A" AND >= 1, >= "A" AND >= 1 + // which is wrong since we missed some documents + // Repeat the same logic for ASC, DESC + // (> "A") OR (= "A" AND < 1), (> "A") OR (= "A" AND <= 1), (> "A") OR (= "A" AND <= 1) + // Again for DESC, ASC + // (< "A") OR (= "A" AND > 1), (< "A") OR (= "A" AND >= 1), (< "A") OR (= "A" AND >= 1) + // And again for DESC DESC + // (< "A") OR (= "A" AND < 1), (< "A") OR (= "A" AND <= 1), (< "A") OR (= "A" AND <= 1) + // The general we look at all prefixes of the order by columns to look for tie breakers. + // Except for the full prefix whose last column follows the rules for single item order by + // And then you just OR all the possibilities together + for (int prefixLength = 1; prefixLength <= numOrderByItems; prefixLength++) + { + ReadOnlySpan<(OrderByColumn orderByColumn, CosmosElement orderByItem)> columnAndItemPrefix = columnAndItems.Span.Slice(start: 0, length: prefixLength); + + bool lastPrefix = prefixLength == numOrderByItems; + + AppendToBuilders(builders, "("); + + for (int index = 0; index < prefixLength; index++) + { + string expression = columnAndItemPrefix[index].orderByColumn.Expression; + SortOrder sortOrder = columnAndItemPrefix[index].orderByColumn.SortOrder; + CosmosElement orderByItem = columnAndItemPrefix[index].orderByItem; + bool lastItem = index == prefixLength - 1; + + AppendToBuilders(builders, "("); + + bool wasInequality; + // We need to add the filter for within the same type. + if (orderByItem is CosmosUndefined) + { + ComparisionWithUndefinedFilters filters = new ComparisionWithUndefinedFilters(expression); + + // Refer to the logic from single order by for how we are handling order by undefined + if (lastItem) + { + if (lastPrefix) + { + if (sortOrder == SortOrder.Descending) + { + // <, <=, <= + AppendToBuilders(builders, filters.LessThan, filters.LessThanOrEqualTo, filters.LessThanOrEqualTo); + } + else + { + // >, >=, >= + AppendToBuilders(builders, filters.GreaterThan, filters.GreaterThanOrEqualTo, filters.GreaterThanOrEqualTo); + } + } + else + { + if (sortOrder == SortOrder.Descending) + { + // <, <, < + AppendToBuilders(builders, filters.LessThan, filters.LessThan, filters.LessThan); + } + else + { + // >, >, > + StreamingOrderByCrossPartitionQueryPipelineStage.AppendToBuilders(builders, filters.GreaterThan, filters.GreaterThan, filters.GreaterThan); + } + } + + wasInequality = true; + } + else + { + // =, =, = + AppendToBuilders(builders, filters.EqualTo); + wasInequality = false; + } + } + else + { + // Append Expression + AppendToBuilders(builders, expression); + AppendToBuilders(builders, " "); + + // Append Binary Operator + if (lastItem) + { + string inequality = sortOrder == SortOrder.Descending ? Expressions.LessThan : Expressions.GreaterThan; + AppendToBuilders(builders, inequality); + if (lastPrefix) + { + AppendToBuilders(builders, string.Empty, Expressions.EqualTo, Expressions.EqualTo); + } + + wasInequality = true; + } + else + { + AppendToBuilders(builders, Expressions.EqualTo); + wasInequality = false; + } + + // Append OrderBy Item + StringBuilder sb = new StringBuilder(); + CosmosElementToQueryLiteral cosmosElementToQueryLiteral = new CosmosElementToQueryLiteral(sb); + orderByItem.Accept(cosmosElementToQueryLiteral); + string orderByItemToString = sb.ToString(); + AppendToBuilders(builders, " "); + AppendToBuilders(builders, orderByItemToString); + AppendToBuilders(builders, " "); + } + + if (wasInequality) + { + // Now we need to include all the types that match the sort order. + ReadOnlyMemory isDefinedFunctions = orderByItem.Accept(CosmosElementToIsSystemFunctionsVisitor.Singleton, sortOrder == SortOrder.Ascending); + foreach (string isDefinedFunction in isDefinedFunctions.Span) + { + AppendToBuilders(builders, " OR "); + AppendToBuilders(builders, $"{isDefinedFunction}({expression}) "); + } + } + + AppendToBuilders(builders, ")"); + + if (!lastItem) + { + AppendToBuilders(builders, " AND "); + } + } + + AppendToBuilders(builders, ")"); + if (!lastPrefix) + { + AppendToBuilders(builders, " OR "); + } + } + } + + return (left.ToString(), target.ToString(), right.ToString()); + } + + private static OrderByContinuationToken CreateOrderByContinuationToken( + ParallelContinuationToken parallelToken, + OrderByQueryResult orderByQueryResult, + int skipCount, + string filter) + { + OrderByContinuationToken token; + // If order by items have c* types then it cannot be converted to resume values + if (ContainsSupportedResumeTypes(orderByQueryResult.OrderByItems)) + { + List resumeValues = new List(orderByQueryResult.OrderByItems.Count); + foreach (OrderByItem orderByItem in orderByQueryResult.OrderByItems) + { + resumeValues.Add(SqlQueryResumeValue.FromOrderByValue(orderByItem.Item)); + } + + token = new OrderByContinuationToken( + parallelToken, + orderByItems: null, + resumeValues, + orderByQueryResult.Rid, + skipCount: skipCount, + filter: filter); + } + else + { + token = new OrderByContinuationToken( + parallelToken, + orderByQueryResult.OrderByItems, + resumeValues: null, + orderByQueryResult.Rid, + skipCount: skipCount, + filter: filter); + } + + return token; + } + + // Helper method to check that resume values are of type that is supported by SqlQueryResumeValue + private static bool ContainsSupportedResumeTypes(IReadOnlyList orderByItems) + { + foreach (OrderByItem orderByItem in orderByItems) + { + if (!orderByItem.Item.Accept(SupportedResumeTypeVisitor.Singleton)) + { + return false; + } + } + + return true; + } + + private static async Task monadicQueryByPage)>> FilterNextAsync( + OrderByQueryPartitionRangePageAsyncEnumerator enumerator, + IReadOnlyList sortOrders, + OrderByContinuationToken continuationToken, + ITrace trace, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + // When we resume a query on a partition there is a possibility that we only read a partial page from the backend + // meaning that will we repeat some documents if we didn't do anything about it. + // The solution is to filter all the documents that come before in the sort order, since we have already emitted them to the client. + // The key is to seek until we get an order by value that matches the order by value we left off on. + // Once we do that we need to seek to the correct _rid within the term, + // since there might be many documents with the same order by value we left off on. + // Finally we need to skip some duplicate _rids, since JOINs emit multiples documents with the same rid and we read a partial page. + // You can also think about this as a seek on a composite index where the columns are [sort_order, rid, skip_count] + + int itemsToSkip = continuationToken.SkipCount; + if (!ResourceId.TryParse(continuationToken.Rid, out ResourceId continuationRid)) + { + return TryCatch<(bool, int, TryCatch)>.FromException( + new MalformedContinuationTokenException( + $"Invalid Rid in the continuation token {continuationToken.ParallelContinuationToken.Token} for OrderBy~Context.")); + } + + if (!await enumerator.MoveNextAsync(trace, cancellationToken)) + { + return TryCatch<(bool, int, TryCatch)>.FromResult((true, 0, enumerator.Current)); + } + + TryCatch monadicOrderByQueryPage = enumerator.Current; + if (monadicOrderByQueryPage.Failed) + { + return TryCatch<(bool, int, TryCatch)>.FromException(monadicOrderByQueryPage.Exception); + } + + OrderByQueryPage orderByQueryPage = monadicOrderByQueryPage.Result; + IEnumerator documents = orderByQueryPage.Enumerator; + + while (documents.MoveNext()) + { + int sortOrderCompare = 0; + // Filter out documents until we find something that matches the sort order. + OrderByQueryResult orderByResult = new OrderByQueryResult(documents.Current); + for (int i = 0; (i < sortOrders.Count) && (sortOrderCompare == 0); ++i) + { + sortOrderCompare = continuationToken.ResumeValues != null + ? continuationToken.ResumeValues[i].CompareTo(orderByResult.OrderByItems[i].Item) + : ItemComparer.Instance.Compare( + continuationToken.OrderByItems[i].Item, + orderByResult.OrderByItems[i].Item); + + if (sortOrderCompare != 0) + { + sortOrderCompare = sortOrders[i] == SortOrder.Ascending ? sortOrderCompare : -sortOrderCompare; + } + } + + if (sortOrderCompare < 0) + { + // We might have passed the item due to deletions and filters. + return TryCatch<(bool, int, TryCatch)>.FromResult((true, 0, enumerator.Current)); + } + + if (sortOrderCompare > 0) + { + // This document does not match the sort order, so skip it. + continue; + } + + // Once the item matches the order by items from the continuation tokens + // We still need to remove all the documents that have a lower or same rid in the rid sort order. + // If there is a tie in the sort order the documents should be in _rid order in the same direction as the index (given by the backend) + ResourceId rid = ResourceId.Parse(orderByResult.Rid); + int ridOrderCompare = continuationRid.Document.CompareTo(rid.Document); + + Lazy cosmosQueryExecutionInfo = orderByQueryPage.Page.CosmosQueryExecutionInfo; + if ((cosmosQueryExecutionInfo == null) || cosmosQueryExecutionInfo.Value.ReverseRidEnabled) + { + // If reverse rid is enabled on the backend then fallback to the old way of doing it. + if (sortOrders[0] == SortOrder.Descending) + { + ridOrderCompare = -ridOrderCompare; + } + } + else + { + // Go by the whatever order the index wants + if (cosmosQueryExecutionInfo.Value.ReverseIndexScan) + { + ridOrderCompare = -ridOrderCompare; + } + } + + if (ridOrderCompare < 0) + { + // We might have passed the rid due to deletions and filters. + return TryCatch<(bool, int, TryCatch)>.FromResult((true, 0, enumerator.Current)); + } + + if (ridOrderCompare > 0) + { + // This document does not match the rid order, so skip it. + continue; + } + + // At this point we need to skip due to joins + if (--itemsToSkip < 0) + { + return TryCatch<(bool, int, TryCatch)>.FromResult((true, 0, enumerator.Current)); + } + } + + // If we made it here it means we failed to find the resume order by item which is possible + // if the user added documents inbetween continuations, so we need to yield and filter the next page of results also. + return TryCatch<(bool, int, TryCatch)>.FromResult((false, itemsToSkip, enumerator.Current)); + } + + private sealed class CosmosElementToIsSystemFunctionsVisitor : ICosmosElementVisitor> + { + public static readonly CosmosElementToIsSystemFunctionsVisitor Singleton = new CosmosElementToIsSystemFunctionsVisitor(); + + private static class IsSystemFunctions + { + public const string Defined = "IS_DEFINED"; + public const string Undefined = "NOT IS_DEFINED"; + public const string Null = "IS_NULL"; + public const string Boolean = "IS_BOOLEAN"; + public const string Number = "IS_NUMBER"; + public const string String = "IS_STRING"; + public const string Array = "IS_ARRAY"; + public const string Object = "IS_OBJECT"; + } + + private static readonly ReadOnlyMemory SystemFunctionSortOrder = new string[] + { + IsSystemFunctions.Undefined, + IsSystemFunctions.Null, + IsSystemFunctions.Boolean, + IsSystemFunctions.Number, + IsSystemFunctions.String, + IsSystemFunctions.Array, + IsSystemFunctions.Object, + }; + + private static readonly ReadOnlyMemory ExtendedTypesSystemFunctionSortOrder = new string[] + { + IsSystemFunctions.Undefined, + IsSystemFunctions.Defined + }; + + private static class SortOrder + { + public const int Undefined = 0; + public const int Null = 1; + public const int Boolean = 2; + public const int Number = 3; + public const int String = 4; + public const int Array = 5; + public const int Object = 6; + } + + private static class ExtendedTypesSortOrder + { + public const int Undefined = 0; + public const int Defined = 1; + } + + private CosmosElementToIsSystemFunctionsVisitor() + { + } + + public ReadOnlyMemory Visit(CosmosArray cosmosArray, bool isAscending) + { + return GetIsDefinedFunctions(SortOrder.Array, isAscending); + } + + public ReadOnlyMemory Visit(CosmosBinary cosmosBinary, bool isAscending) + { + return GetExtendedTypesIsDefinedFunctions(ExtendedTypesSortOrder.Defined, isAscending); + } + + public ReadOnlyMemory Visit(CosmosBoolean cosmosBoolean, bool isAscending) + { + return GetIsDefinedFunctions(SortOrder.Boolean, isAscending); + } + + public ReadOnlyMemory Visit(CosmosGuid cosmosGuid, bool isAscending) + { + return GetExtendedTypesIsDefinedFunctions(ExtendedTypesSortOrder.Defined, isAscending); + } + + public ReadOnlyMemory Visit(CosmosNull cosmosNull, bool isAscending) + { + return GetIsDefinedFunctions(SortOrder.Null, isAscending); + } + + public ReadOnlyMemory Visit(CosmosUndefined cosmosUndefined, bool isAscending) + { + return isAscending ? SystemFunctionSortOrder.Slice(start: 1) : ReadOnlyMemory.Empty; + } + + public ReadOnlyMemory Visit(CosmosNumber cosmosNumber, bool isAscending) + { + return GetIsDefinedFunctions(SortOrder.Number, isAscending); + } + + public ReadOnlyMemory Visit(CosmosObject cosmosObject, bool isAscending) + { + return GetIsDefinedFunctions(SortOrder.Object, isAscending); + } + + public ReadOnlyMemory Visit(CosmosString cosmosString, bool isAscending) + { + return GetIsDefinedFunctions(SortOrder.String, isAscending); + } + + private static ReadOnlyMemory GetIsDefinedFunctions(int index, bool isAscending) + { + return isAscending ? SystemFunctionSortOrder.Slice(index + 1) : SystemFunctionSortOrder.Slice(start: 0, index); + } + + private static ReadOnlyMemory GetExtendedTypesIsDefinedFunctions(int index, bool isAscending) + { + return isAscending ? + ExtendedTypesSystemFunctionSortOrder.Slice(index + 1) : + ExtendedTypesSystemFunctionSortOrder.Slice(start: 0, index); + } + } + + private readonly struct ComparisionWithUndefinedFilters + { + public ComparisionWithUndefinedFilters( + string expression) + { + this.LessThan = "false"; + this.LessThanOrEqualTo = $"NOT IS_DEFINED({expression})"; + this.EqualTo = $"NOT IS_DEFINED({expression})"; + this.GreaterThan = $"IS_DEFINED({expression})"; + this.GreaterThanOrEqualTo = "true"; + } + + public string LessThan { get; } + public string LessThanOrEqualTo { get; } + public string EqualTo { get; } + public string GreaterThan { get; } + public string GreaterThanOrEqualTo { get; } + } + + private sealed class SupportedResumeTypeVisitor : ICosmosElementVisitor + { + public static readonly SupportedResumeTypeVisitor Singleton = new SupportedResumeTypeVisitor(); + + private SupportedResumeTypeVisitor() + { + } + + public bool Visit(CosmosArray cosmosArray) + { + return true; + } + + public bool Visit(CosmosBinary cosmosBinary) + { + return false; + } + + public bool Visit(CosmosBoolean cosmosBoolean) + { + return true; + } + + public bool Visit(CosmosGuid cosmosGuid) + { + return false; + } + + public bool Visit(CosmosNull cosmosNull) + { + return true; + } + + public bool Visit(CosmosNumber cosmosNumber) + { + return cosmosNumber.Accept(SqlQueryResumeValue.SupportedResumeNumberTypeVisitor.Singleton); + } + + public bool Visit(CosmosObject cosmosObject) + { + return true; + } + + public bool Visit(CosmosString cosmosString) + { + return true; + } + + public bool Visit(CosmosUndefined cosmosUndefined) + { + return true; + } + } + } + + private sealed class NonStreamingOrderByPipelineStage : IQueryPipelineStage + { + private const int FlatHeapSizeLimit = 4096; + + private const int MaximumPageSize = 2048; + + private const string DisallowContinuationTokenMessage = "Continuation tokens are not supported for the non streaming order by pipeline."; + + private static readonly QueryState NonStreamingOrderByInProgress = new QueryState(CosmosString.Create("NonStreamingOrderByInProgress")); + + private readonly int pageSize; + + private readonly double totalRequestCharge; + + private readonly string activityId; + + private readonly Lazy cosmosQueryExecutionInfo; + + private readonly DistributionPlanSpec distributionPlanSpec; + + private readonly IReadOnlyDictionary additionalHeaders; + + private readonly IEnumerator enumerator; + + private int totalBufferedResultCount; + + private bool firstPage; + + public TryCatch Current { get; private set; } + + private NonStreamingOrderByPipelineStage( + int pageSize, + double totalRequestCharge, + string activityId, + Lazy cosmosQueryExecutionInfo, + DistributionPlanSpec distributionPlanSpec, + IReadOnlyDictionary additionalHeaders, + IEnumerator enumerator, + int totalBufferedResultCount) + { + this.pageSize = pageSize; + this.totalRequestCharge = totalRequestCharge; + this.activityId = activityId ?? throw new ArgumentNullException(nameof(activityId)); + this.cosmosQueryExecutionInfo = cosmosQueryExecutionInfo; + this.distributionPlanSpec = distributionPlanSpec; + this.additionalHeaders = additionalHeaders; + this.firstPage = true; + this.enumerator = enumerator ?? throw new ArgumentNullException(nameof(enumerator)); + this.totalBufferedResultCount = totalBufferedResultCount; + } + + public ValueTask DisposeAsync() + { + this.enumerator.Dispose(); + return default; + } + + public ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + List documents = this.totalBufferedResultCount >= this.pageSize ? new List(this.pageSize) : new List(); + for (int count = 0; count < this.pageSize && this.enumerator.MoveNext(); ++count) + { + documents.Add(this.enumerator.Current.Payload); + } + + this.totalBufferedResultCount -= documents.Count; + + if (this.firstPage || documents.Count > 0) + { + double requestCharge = this.firstPage ? this.totalRequestCharge : 0; + QueryPage queryPage = new QueryPage( + documents: documents, + requestCharge: requestCharge, + activityId: this.activityId, + cosmosQueryExecutionInfo: this.cosmosQueryExecutionInfo, + distributionPlanSpec: this.distributionPlanSpec, + disallowContinuationTokenMessage: DisallowContinuationTokenMessage, + additionalHeaders: this.additionalHeaders, + state: documents.Count > 0 ? NonStreamingOrderByInProgress : null, + streaming: false); + + this.firstPage = false; + this.Current = TryCatch.FromResult(queryPage); + return new ValueTask(true); + } + else + { + return new ValueTask(false); + } + } + + public static async Task CreateAsync( + QueryPaginationOptions queryPaginationOptions, + IReadOnlyList sortOrders, + IEnumerable enumerators, + QueryPageParameters queryPageParameters, + ITrace trace, + CancellationToken cancellationToken) + { + int pageSize = queryPaginationOptions.PageSizeLimit.GetValueOrDefault(MaximumPageSize) > 0 ? + Math.Min(MaximumPageSize, queryPaginationOptions.PageSizeLimit.Value) : + MaximumPageSize; + + int totalBufferedResultCount = 0; + foreach (OrderByQueryPartitionRangePageAsyncEnumerator enumerator in enumerators) + { + totalBufferedResultCount += enumerator.BufferedResultCount; + } + + OrderByQueryResultComparer comparer = new OrderByQueryResultComparer(sortOrders); + (IEnumerator orderbyQueryResultEnumerator, double totalRequestCharge) = await OrderByCrossPartitionEnumerator.CreateAsync( + enumerators, + comparer, + FlatHeapSizeLimit, + trace, + cancellationToken); + + return new NonStreamingOrderByPipelineStage( + pageSize, + totalRequestCharge, + queryPageParameters.ActivityId, + queryPageParameters.CosmosQueryExecutionInfo, + queryPageParameters.DistributionPlanSpec, + queryPageParameters.AdditionalHeaders, + orderbyQueryResultEnumerator, + totalBufferedResultCount); + } + } + } +} diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByEnumeratorComparer.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByEnumeratorComparer.cs index 5b9858154a..4c3acd6eaa 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByEnumeratorComparer.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByEnumeratorComparer.cs @@ -4,9 +4,7 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy { - using System; using System.Collections.Generic; - using System.Diagnostics; /// /// For cross partition order by queries we serve documents from the partition @@ -14,12 +12,8 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy /// If there is a tie, then we break the tie by picking the leftmost partition. /// internal sealed class OrderByEnumeratorComparer : IComparer - { - /// - /// The sort orders for the query (1 for each order by in the query). - /// Until composite indexing is released this will just be an array of length 1. - /// - private readonly IReadOnlyList sortOrders; + { + private readonly OrderByQueryResultComparer comparer; /// /// Initializes a new instance of the OrderByConsumeComparer class. @@ -27,17 +21,7 @@ internal sealed class OrderByEnumeratorComparer : IComparerThe sort orders for the query. public OrderByEnumeratorComparer(IReadOnlyList sortOrders) { - if (sortOrders == null) - { - throw new ArgumentNullException("Sort Orders array can not be null for an order by comparer."); - } - - if (sortOrders.Count == 0) - { - throw new ArgumentException("Sort Orders array can not be empty for an order by comparer."); - } - - this.sortOrders = new List(sortOrders); + this.comparer = new OrderByQueryResultComparer(sortOrders); } /// @@ -78,7 +62,7 @@ public int Compare( OrderByQueryResult result2 = new OrderByQueryResult(enumerator2.Current.Result.Enumerator.Current); // First compare the documents based on the sort order of the query. - int cmp = this.CompareOrderByItems(result1.OrderByItems, result2.OrderByItems); + int cmp = this.comparer.Compare(result1, result2); if (cmp != 0) { // If there is no tie just return that. @@ -88,56 +72,5 @@ public int Compare( // If there is a tie, then break the tie by picking the one from the left most partition. return string.CompareOrdinal(((FeedRangeEpk)enumerator1.FeedRangeState.FeedRange).Range.Min, ((FeedRangeEpk)enumerator2.FeedRangeState.FeedRange).Range.Min); } - - /// - /// Takes the items relevant to the sort and return an integer defining the relationship. - /// - /// The items relevant to the sort from the first partition. - /// The items relevant to the sort from the second partition. - /// The sort relationship. - /// - /// Suppose the query was "SELECT * FROM c ORDER BY c.name asc, c.age desc", - /// then items1 could be ["Brandon", 22] and items2 could be ["Felix", 28] - /// Then we would first compare "Brandon" to "Felix" and say that "Brandon" comes first in an ascending lex order (we don't even have to look at age). - /// If items1 was ["Brandon", 22] and items2 was ["Brandon", 23] then we would say have to look at the age to break the tie and in this case 23 comes first in a descending order. - /// Some examples of composite order by: http://www.dofactory.com/sql/order-by - /// - public int CompareOrderByItems(IReadOnlyList items1, IReadOnlyList items2) - { - if (object.ReferenceEquals(items1, items2)) - { - return 0; - } - - Debug.Assert( - items1 != null && items2 != null, - "Order-by items must be present."); - - Debug.Assert( - items1.Count == items2.Count, - "OrderByResult instances should have the same number of order-by items."); - - Debug.Assert( - items1.Count > 0, - "OrderByResult instances should have at least 1 order-by item."); - - Debug.Assert( - this.sortOrders.Count == items1.Count, - "SortOrders must match size of order-by items."); - - for (int i = 0; i < this.sortOrders.Count; ++i) - { - int cmp = ItemComparer.Instance.Compare( - items1[i].Item, - items2[i].Item); - - if (cmp != 0) - { - return this.sortOrders[i] != SortOrder.Descending ? cmp : -cmp; - } - } - - return 0; - } } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryPage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryPage.cs index 5a482ba6e6..9bb2b3c3b5 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryPage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryPage.cs @@ -28,8 +28,10 @@ public OrderByQueryPage(QueryPage queryPage) public QueryPage Page { get; } - public IEnumerator Enumerator { get; } - + public IEnumerator Enumerator { get; } + + public override int ItemCount => this.Page.ItemCount; + protected override ImmutableHashSet DerivedClassBannedHeaders => bannedHeaders; } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryPartitionRangePageAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryPartitionRangePageAsyncEnumerator.cs index bf5bb863fe..49aedfc912 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryPartitionRangePageAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryPartitionRangePageAsyncEnumerator.cs @@ -4,7 +4,8 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy { - using System; + using System; + using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.Pagination; @@ -15,51 +16,93 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy internal sealed class OrderByQueryPartitionRangePageAsyncEnumerator : PartitionRangePageAsyncEnumerator, IPrefetcher { private readonly InnerEnumerator innerEnumerator; - private readonly BufferedPartitionRangePageAsyncEnumerator bufferedEnumerator; - - public OrderByQueryPartitionRangePageAsyncEnumerator( + private readonly BufferedPartitionRangePageAsyncEnumeratorBase bufferedEnumerator; + + public static OrderByQueryPartitionRangePageAsyncEnumerator Create( IQueryDataSource queryDataSource, SqlQuerySpec sqlQuerySpec, FeedRangeState feedRangeState, PartitionKey? partitionKey, QueryPaginationOptions queryPaginationOptions, - string filter, - CancellationToken cancellationToken) - : base(feedRangeState, cancellationToken) - { - this.StartOfPageState = feedRangeState.State; - this.innerEnumerator = new InnerEnumerator( + string filter, + PrefetchPolicy prefetchPolicy) + { + InnerEnumerator enumerator = new InnerEnumerator( queryDataSource, sqlQuerySpec, feedRangeState, partitionKey, queryPaginationOptions, - filter, - cancellationToken); - this.bufferedEnumerator = new BufferedPartitionRangePageAsyncEnumerator( - this.innerEnumerator, - cancellationToken); - } - + filter); + + BufferedPartitionRangePageAsyncEnumeratorBase bufferedEnumerator = prefetchPolicy switch + { + PrefetchPolicy.PrefetchSinglePage => new BufferedPartitionRangePageAsyncEnumerator(enumerator), + PrefetchPolicy.PrefetchAll => new FullyBufferedPartitionRangeAsyncEnumerator(enumerator), + _ => throw new ArgumentOutOfRangeException(nameof(prefetchPolicy)), + }; + + return new OrderByQueryPartitionRangePageAsyncEnumerator(enumerator, bufferedEnumerator, feedRangeState); + } + + private OrderByQueryPartitionRangePageAsyncEnumerator( + InnerEnumerator innerEnumerator, + BufferedPartitionRangePageAsyncEnumeratorBase bufferedEnumerator, + FeedRangeState feedRangeState) + : base(feedRangeState) + { + this.innerEnumerator = innerEnumerator ?? throw new ArgumentNullException(nameof(innerEnumerator)); + this.bufferedEnumerator = bufferedEnumerator ?? throw new ArgumentNullException(nameof(bufferedEnumerator)); + this.StartOfPageState = feedRangeState.State; + } + public SqlQuerySpec SqlQuerySpec => this.innerEnumerator.SqlQuerySpec; public QueryPaginationOptions QueryPaginationOptions => this.innerEnumerator.QueryPaginationOptions; public string Filter => this.innerEnumerator.Filter; - public QueryState StartOfPageState { get; private set; } + public QueryState StartOfPageState { get; private set; } + + public int BufferedResultCount => this.bufferedEnumerator.BufferedItemCount; - public override ValueTask DisposeAsync() => default; + public override ValueTask DisposeAsync() + { + // the innerEnumerator is passed to the bufferedEnumerator + return this.bufferedEnumerator.DisposeAsync(); + } protected override async Task> GetNextPageAsync(ITrace trace, CancellationToken cancellationToken) { this.StartOfPageState = this.FeedRangeState.State; - await this.bufferedEnumerator.MoveNextAsync(trace); + await this.bufferedEnumerator.MoveNextAsync(trace, cancellationToken); return this.bufferedEnumerator.Current; } - public ValueTask PrefetchAsync(ITrace trace, CancellationToken cancellationToken) => this.bufferedEnumerator.PrefetchAsync(trace, cancellationToken); - + public ValueTask PrefetchAsync(ITrace trace, CancellationToken cancellationToken) + { + return this.bufferedEnumerator.PrefetchAsync(trace, cancellationToken); + } + + public OrderByQueryPartitionRangePageAsyncEnumerator CloneAsFullyBufferedEnumerator() + { + if (this.Current.Failed) + { + throw new InvalidOperationException($"{nameof(CloneAsFullyBufferedEnumerator)} is valid only if the enumerator has not failed"); + } + + InnerEnumerator innerEnumerator = this.innerEnumerator.CloneWithMaxPageSize(); + + FullyBufferedPartitionRangeAsyncEnumerator bufferedEnumerator = new FullyBufferedPartitionRangeAsyncEnumerator( + innerEnumerator, + new List { this.Current.Result }); + + return new OrderByQueryPartitionRangePageAsyncEnumerator( + innerEnumerator, + bufferedEnumerator, + this.FeedRangeState); + } + private sealed class InnerEnumerator : PartitionRangePageAsyncEnumerator { private readonly IQueryDataSource queryDataSource; @@ -70,9 +113,8 @@ public InnerEnumerator( FeedRangeState feedRangeState, PartitionKey? partitionKey, QueryPaginationOptions queryPaginationOptions, - string filter, - CancellationToken cancellationToken) - : base(feedRangeState, cancellationToken) + string filter) + : base(feedRangeState) { this.queryDataSource = queryDataSource ?? throw new ArgumentNullException(nameof(queryDataSource)); this.SqlQuerySpec = sqlQuerySpec ?? throw new ArgumentNullException(nameof(sqlQuerySpec)); @@ -87,8 +129,24 @@ public InnerEnumerator( public QueryPaginationOptions QueryPaginationOptions { get; } - public string Filter { get; } - + public string Filter { get; } + + public InnerEnumerator CloneWithMaxPageSize() + { + QueryPaginationOptions options = new QueryPaginationOptions( + pageSizeHint: int.MaxValue, + optimisticDirectExecute: this.QueryPaginationOptions.OptimisticDirectExecute, + additionalHeaders: this.QueryPaginationOptions.AdditionalHeaders); + + return new InnerEnumerator( + this.queryDataSource, + this.SqlQuerySpec, + this.FeedRangeState, + this.PartitionKey, + options, + this.Filter); + } + public override ValueTask DisposeAsync() => default; protected override async Task> GetNextPageAsync(ITrace trace, CancellationToken cancellationToken) diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryResult.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryResult.cs index e3f46bd848..ed43b01794 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryResult.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryResult.cs @@ -25,8 +25,10 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy /// /// internal readonly struct OrderByQueryResult - { - private readonly CosmosObject cosmosObject; + { + private readonly CosmosObject cosmosObject; + + private readonly IReadOnlyList orderByItems; public OrderByQueryResult(CosmosElement cosmosElement) { @@ -40,7 +42,8 @@ public OrderByQueryResult(CosmosElement cosmosElement) throw new ArgumentException($"{nameof(cosmosElement)} must not be an object."); } - this.cosmosObject = cosmosObject; + this.cosmosObject = cosmosObject; + this.orderByItems = GetOrderByItems(cosmosObject); } /// @@ -51,17 +54,17 @@ public string Rid get { // cassandra row uses __sys_rid as opposed to _rid - if (!this.cosmosObject.TryGetValue("_rid", out CosmosElement cosmosElement)) + if (!this.cosmosObject.TryGetValue(FieldNames.Rid, out CosmosElement cosmosElement)) { - if (!this.cosmosObject.TryGetValue("__sys_rid", out cosmosElement)) + if (!this.cosmosObject.TryGetValue(FieldNames.CassandraRid, out cosmosElement)) { - throw new InvalidOperationException($"Underlying object does not have an '_rid' or '__sys_rid' field."); + throw new InvalidOperationException($"Underlying object does not have an '{FieldNames.Rid}' or '{FieldNames.CassandraRid}' field."); } } if (!(cosmosElement is CosmosString cosmosString)) { - throw new InvalidOperationException($"'_rid' or '__sys_rid' field was not a string."); + throw new InvalidOperationException($"'{FieldNames.Rid}' or ' {FieldNames.CassandraRid} ' field.g."); } return cosmosString.Value; @@ -71,29 +74,7 @@ public string Rid /// /// Gets the order by items from the document. /// - public IReadOnlyList OrderByItems - { - get - { - if (!this.cosmosObject.TryGetValue("orderByItems", out CosmosElement cosmosElement)) - { - throw new InvalidOperationException($"Underlying object does not have an 'orderByItems' field."); - } - - if (!(cosmosElement is CosmosArray cosmosArray)) - { - throw new InvalidOperationException($"orderByItems field was not an array."); - } - - List orderByItems = new List(cosmosArray.Count); - foreach (CosmosElement orderByItem in cosmosArray) - { - orderByItems.Add(new OrderByItem(orderByItem)); - } - - return orderByItems; - } - } + public IReadOnlyList OrderByItems => this.orderByItems; /// /// Gets the actual document. @@ -102,13 +83,45 @@ public CosmosElement Payload { get { - if (!this.cosmosObject.TryGetValue("payload", out CosmosElement cosmosElement)) + if (!this.cosmosObject.TryGetValue(FieldNames.Payload, out CosmosElement cosmosElement)) { return CosmosUndefined.Create(); } return cosmosElement; } + } + + private static IReadOnlyList GetOrderByItems(CosmosObject cosmosObject) + { + if (!cosmosObject.TryGetValue(FieldNames.OrderByItems, out CosmosElement cosmosElement)) + { + throw new InvalidOperationException($"Underlying object does not have an 'orderByItems' field."); + } + + if (!(cosmosElement is CosmosArray cosmosArray)) + { + throw new InvalidOperationException($"orderByItems field was not an array."); + } + + List orderByItems = new List(cosmosArray.Count); + foreach (CosmosElement orderByItem in cosmosArray) + { + orderByItems.Add(new OrderByItem(orderByItem)); + } + + return orderByItems; + } + + private static class FieldNames + { + public const string OrderByItems = "orderByItems"; + + public const string Payload = "payload"; + + public const string Rid = "_rid"; + + public const string CassandraRid = "__sys_rid"; } } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryResultComparer.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryResultComparer.cs new file mode 100644 index 0000000000..929ccc5e7b --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/OrderBy/OrderByQueryResultComparer.cs @@ -0,0 +1,85 @@ +// ------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +// ------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy +{ + using System.Collections.Generic; + using System.Diagnostics; + + internal sealed class OrderByQueryResultComparer : IComparer + { + private readonly IReadOnlyList sortOrders; + + public OrderByQueryResultComparer(IReadOnlyList sortOrders) + { + if (sortOrders == null) + { + throw new System.ArgumentNullException("Sort Orders array can not be null for an order by comparer."); + } + + if (sortOrders.Count == 0) + { + throw new System.ArgumentException("Sort Orders array can not be empty for an order by comparer."); + } + + this.sortOrders = sortOrders; + } + + public int Compare(OrderByQueryResult x, OrderByQueryResult y) + { + return this.CompareOrderByItems(x.OrderByItems, y.OrderByItems); + } + + /// + /// Takes the items relevant to the sort and return an integer defining the relationship. + /// + /// The items relevant to the sort from the first partition. + /// The items relevant to the sort from the second partition. + /// The sort relationship. + /// + /// Suppose the query was "SELECT * FROM c ORDER BY c.name asc, c.age desc", + /// then items1 could be ["Brandon", 22] and items2 could be ["Felix", 28] + /// Then we would first compare "Brandon" to "Felix" and say that "Brandon" comes first in an ascending lex order (we don't even have to look at age). + /// If items1 was ["Brandon", 22] and items2 was ["Brandon", 23] then we would say have to look at the age to break the tie and in this case 23 comes first in a descending order. + /// Some examples of composite order by: http://www.dofactory.com/sql/order-by + /// + private int CompareOrderByItems(IReadOnlyList items1, IReadOnlyList items2) + { + if (object.ReferenceEquals(items1, items2)) + { + return 0; + } + + Debug.Assert( + items1 != null && items2 != null, + "Order-by items must be present."); + + Debug.Assert( + items1.Count == items2.Count, + "OrderByResult instances should have the same number of order-by items."); + + Debug.Assert( + items1.Count > 0, + "OrderByResult instances should have at least 1 order-by item."); + + Debug.Assert( + this.sortOrders.Count == items1.Count, + "SortOrders must match size of order-by items."); + + for (int i = 0; i < this.sortOrders.Count; ++i) + { + int cmp = ItemComparer.Instance.Compare( + items1[i].Item, + items2[i].Item); + + if (cmp != 0) + { + return this.sortOrders[i] != SortOrder.Descending ? cmp : -cmp; + } + } + + return 0; + } + } +} diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/Parallel/ParallelCrossPartitionQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/Parallel/ParallelCrossPartitionQueryPipelineStage.cs index b84a36870e..d9b564b5f3 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/Parallel/ParallelCrossPartitionQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/Parallel/ParallelCrossPartitionQueryPipelineStage.cs @@ -27,14 +27,11 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.Parallel internal sealed class ParallelCrossPartitionQueryPipelineStage : IQueryPipelineStage { private readonly CrossPartitionRangePageAsyncEnumerator crossPartitionRangePageAsyncEnumerator; - private CancellationToken cancellationToken; private ParallelCrossPartitionQueryPipelineStage( - CrossPartitionRangePageAsyncEnumerator crossPartitionRangePageAsyncEnumerator, - CancellationToken cancellationToken) + CrossPartitionRangePageAsyncEnumerator crossPartitionRangePageAsyncEnumerator) { this.crossPartitionRangePageAsyncEnumerator = crossPartitionRangePageAsyncEnumerator ?? throw new ArgumentNullException(nameof(crossPartitionRangePageAsyncEnumerator)); - this.cancellationToken = cancellationToken; } public TryCatch Current { get; private set; } @@ -48,14 +45,14 @@ public ValueTask DisposeAsync() // 1) We fully drain from the left most partition before moving on to the next partition // 2) We drain only full pages from the document producer so we aren't left with a partial page // otherwise we would need to add to the continuation token how many items to skip over on that page. - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - if (!await this.crossPartitionRangePageAsyncEnumerator.MoveNextAsync(trace)) + if (!await this.crossPartitionRangePageAsyncEnumerator.MoveNextAsync(trace, cancellationToken)) { this.Current = default; return false; @@ -97,7 +94,7 @@ public async ValueTask MoveNextAsync(ITrace trace) foreach (FeedRangeState feedRangeState in feedRangeStates.Skip(1)) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (feedRangeState.State != null) { @@ -120,12 +117,12 @@ public async ValueTask MoveNextAsync(ITrace trace) backendQueryPage.Documents, backendQueryPage.RequestCharge, backendQueryPage.ActivityId, - backendQueryPage.ResponseLengthInBytes, backendQueryPage.CosmosQueryExecutionInfo, distributionPlanSpec: default, backendQueryPage.DisallowContinuationTokenMessage, backendQueryPage.AdditionalHeaders, - queryState); + queryState, + backendQueryPage.Streaming); this.Current = TryCatch.FromResult(crossPartitionQueryPage); return true; @@ -139,8 +136,7 @@ public static TryCatch MonadicCreate( QueryPaginationOptions queryPaginationOptions, int maxConcurrency, PrefetchPolicy prefetchPolicy, - CosmosElement continuationToken, - CancellationToken cancellationToken) + CosmosElement continuationToken) { if (targetRanges == null) { @@ -162,14 +158,13 @@ public static TryCatch MonadicCreate( CrossPartitionRangePageAsyncEnumerator crossPartitionPageEnumerator = new CrossPartitionRangePageAsyncEnumerator( feedRangeProvider: documentContainer, - createPartitionRangeEnumerator: ParallelCrossPartitionQueryPipelineStage.MakeCreateFunction(documentContainer, sqlQuerySpec, queryPaginationOptions, partitionKey, cancellationToken), + createPartitionRangeEnumerator: ParallelCrossPartitionQueryPipelineStage.MakeCreateFunction(documentContainer, sqlQuerySpec, queryPaginationOptions, partitionKey), comparer: Comparer.Singleton, maxConcurrency: maxConcurrency, prefetchPolicy: prefetchPolicy, - state: state, - cancellationToken: cancellationToken); + state: state); - ParallelCrossPartitionQueryPipelineStage stage = new ParallelCrossPartitionQueryPipelineStage(crossPartitionPageEnumerator, cancellationToken); + ParallelCrossPartitionQueryPipelineStage stage = new ParallelCrossPartitionQueryPipelineStage(crossPartitionPageEnumerator); return TryCatch.FromResult(stage); } @@ -248,20 +243,12 @@ private static CreatePartitionRangePageAsyncEnumerator Ma IQueryDataSource queryDataSource, SqlQuerySpec sqlQuerySpec, QueryPaginationOptions queryPaginationOptions, - Cosmos.PartitionKey? partitionKey, - CancellationToken cancellationToken) => (FeedRangeState feedRangeState) => new QueryPartitionRangePageAsyncEnumerator( + Cosmos.PartitionKey? partitionKey) => (FeedRangeState feedRangeState) => new QueryPartitionRangePageAsyncEnumerator( queryDataSource, sqlQuerySpec, feedRangeState, partitionKey, - queryPaginationOptions, - cancellationToken); - - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.cancellationToken = cancellationToken; - this.crossPartitionRangePageAsyncEnumerator.SetCancellationToken(cancellationToken); - } + queryPaginationOptions); private sealed class Comparer : IComparer> { diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/Parallel/QueryPartitionRangePageAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/Parallel/QueryPartitionRangePageAsyncEnumerator.cs index e2918a366e..5744ed734b 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/Parallel/QueryPartitionRangePageAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/CrossPartition/Parallel/QueryPartitionRangePageAsyncEnumerator.cs @@ -24,9 +24,8 @@ public QueryPartitionRangePageAsyncEnumerator( SqlQuerySpec sqlQuerySpec, FeedRangeState feedRangeState, Cosmos.PartitionKey? partitionKey, - QueryPaginationOptions queryPaginationOptions, - CancellationToken cancellationToken) - : base(feedRangeState, cancellationToken) + QueryPaginationOptions queryPaginationOptions) + : base(feedRangeState) { this.queryDataSource = queryDataSource ?? throw new ArgumentNullException(nameof(queryDataSource)); this.sqlQuerySpec = sqlQuerySpec ?? throw new ArgumentNullException(nameof(sqlQuerySpec)); diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.Client.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.Client.cs index 162408e6ff..0e09e2c159 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.Client.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.Client.cs @@ -25,9 +25,8 @@ private sealed class ClientDCountQueryPipelineStage : DCountQueryPipelineStage private ClientDCountQueryPipelineStage( IQueryPipelineStage source, long count, - DCountInfo info, - CancellationToken cancellationToken) - : base(source, count, info, cancellationToken) + DCountInfo info) + : base(source, count, info) { // all the work is done in the base constructor. } @@ -35,7 +34,6 @@ private ClientDCountQueryPipelineStage( public static TryCatch MonadicCreate( DCountInfo info, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { if (monadicCreatePipelineStage == null) @@ -43,7 +41,7 @@ public static TryCatch MonadicCreate( throw new ArgumentNullException(nameof(monadicCreatePipelineStage)); } - TryCatch tryCreateSource = monadicCreatePipelineStage(continuationToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(continuationToken); if (tryCreateSource.Failed) { return tryCreateSource; @@ -52,15 +50,14 @@ public static TryCatch MonadicCreate( ClientDCountQueryPipelineStage stage = new ClientDCountQueryPipelineStage( source: tryCreateSource.Result, count: 0, - info: info, - cancellationToken: cancellationToken); + info: info); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { @@ -73,9 +70,8 @@ public override async ValueTask MoveNextAsync(ITrace trace) } double requestCharge = 0; - long responseLengthBytes = 0; IReadOnlyDictionary additionalHeaders = null; - while (await this.inputStage.MoveNextAsync(trace)) + while (await this.inputStage.MoveNextAsync(trace, cancellationToken)) { TryCatch tryGetPageFromSource = this.inputStage.Current; if (tryGetPageFromSource.Failed) @@ -87,10 +83,9 @@ public override async ValueTask MoveNextAsync(ITrace trace) QueryPage sourcePage = tryGetPageFromSource.Result; requestCharge += sourcePage.RequestCharge; - responseLengthBytes += sourcePage.ResponseLengthInBytes; additionalHeaders = sourcePage.AdditionalHeaders; - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); this.count += sourcePage.Documents.Count; } @@ -105,12 +100,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: finalResult, requestCharge: requestCharge, activityId: default, - responseLengthInBytes: responseLengthBytes, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: additionalHeaders, - state: default); + state: default, + streaming: default); this.Current = TryCatch.FromResult(queryPage); this.returnedFinalPage = true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.Compute.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.Compute.cs index 0970be1706..84f8cfff64 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.Compute.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.Compute.cs @@ -29,9 +29,8 @@ private sealed class ComputeDCountQueryPipelineStage : DCountQueryPipelineStage private ComputeDCountQueryPipelineStage( IQueryPipelineStage source, long count, - DCountInfo info, - CancellationToken cancellationToken) - : base(source, count, info, cancellationToken) + DCountInfo info) + : base(source, count, info) { // all the work is done in the base constructor. } @@ -39,11 +38,8 @@ private ComputeDCountQueryPipelineStage( public static TryCatch MonadicCreate( DCountInfo info, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { - cancellationToken.ThrowIfCancellationRequested(); - DCountContinuationToken dcountContinuationToken; if (continuationToken != null) { @@ -68,7 +64,7 @@ public static TryCatch MonadicCreate( } else { - tryCreateSource = monadicCreatePipelineStage(dcountContinuationToken.SourceContinuationToken, cancellationToken); + tryCreateSource = monadicCreatePipelineStage(dcountContinuationToken.SourceContinuationToken); } if (tryCreateSource.Failed) @@ -79,15 +75,14 @@ public static TryCatch MonadicCreate( ComputeDCountQueryPipelineStage stage = new ComputeDCountQueryPipelineStage( tryCreateSource.Result, dcountContinuationToken.Count, - info, - cancellationToken); + info); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { @@ -102,7 +97,7 @@ public override async ValueTask MoveNextAsync(ITrace trace) // Draining aggregates is broken down into two stages QueryPage queryPage; - if (await this.inputStage.MoveNextAsync(trace)) + if (await this.inputStage.MoveNextAsync(trace, cancellationToken)) { // Stage 1: // Drain the aggregates fully from all continuations and all partitions @@ -115,7 +110,7 @@ public override async ValueTask MoveNextAsync(ITrace trace) } QueryPage sourcePage = tryGetSourcePage.Result; - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); this.count += sourcePage.Documents.Count; DCountContinuationToken dcountContinuationToken = new DCountContinuationToken( @@ -126,12 +121,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: EmptyResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: queryState); + state: queryState, + streaming: sourcePage.Streaming); queryPage = emptyPage; } @@ -150,12 +145,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: finalResult, requestCharge: default, activityId: default, - responseLengthInBytes: default, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: default, - state: default); + state: default, + streaming: default); queryPage = finalPage; this.returnedFinalPage = true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.cs index 067379cd5e..3f4079ad45 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/DCount/DCountQueryPipelineStage.cs @@ -35,14 +35,12 @@ internal abstract partial class DCountQueryPipelineStage : QueryPipelineStageBas /// The source component that will supply the local aggregates from multiple continuations and partitions. /// The actual dcount that will be reported. /// Metadata about the original dcount query that is elided in the rewritten query - /// The cancellation token for cooperative yeilding. /// This constructor is private since there is some async initialization that needs to happen in CreateAsync(). public DCountQueryPipelineStage( IQueryPipelineStage source, long count, - DCountInfo info, - CancellationToken cancellationToken) - : base(source, cancellationToken) + DCountInfo info) + : base(source) { this.count = count; this.info = info; @@ -52,18 +50,15 @@ public static TryCatch MonadicCreate( ExecutionEnvironment executionEnvironment, DCountInfo info, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) => executionEnvironment switch { ExecutionEnvironment.Client => ClientDCountQueryPipelineStage.MonadicCreate( info, continuationToken, - cancellationToken, monadicCreatePipelineStage), ExecutionEnvironment.Compute => ComputeDCountQueryPipelineStage.MonadicCreate( info, continuationToken, - cancellationToken, monadicCreatePipelineStage), _ => throw new ArgumentException($"Unknown {nameof(ExecutionEnvironment)}: {executionEnvironment}."), }; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.Client.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.Client.cs index c250193a60..b8536f6c05 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.Client.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.Client.cs @@ -31,9 +31,8 @@ private sealed class ClientDistinctQueryPipelineStage : DistinctQueryPipelineSta private ClientDistinctQueryPipelineStage( DistinctQueryType distinctQueryType, DistinctMap distinctMap, - IQueryPipelineStage source, - CancellationToken cancellationToken) - : base(distinctMap, source, cancellationToken) + IQueryPipelineStage source) + : base(distinctMap, source) { if ((distinctQueryType != DistinctQueryType.Unordered) && (distinctQueryType != DistinctQueryType.Ordered)) { @@ -45,7 +44,6 @@ private ClientDistinctQueryPipelineStage( public static TryCatch MonadicCreate( CosmosElement requestContinuation, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage, DistinctQueryType distinctQueryType) { @@ -101,7 +99,7 @@ public static TryCatch MonadicCreate( sourceToken = null; } - TryCatch tryCreateSource = monadicCreatePipelineStage(sourceToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(sourceToken); if (!tryCreateSource.Succeeded) { return TryCatch.FromException(tryCreateSource.Exception); @@ -111,20 +109,19 @@ public static TryCatch MonadicCreate( new ClientDistinctQueryPipelineStage( distinctQueryType, tryCreateDistinctMap.Result, - tryCreateSource.Result, - cancellationToken)); + tryCreateSource.Result)); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - if (!await this.inputStage.MoveNextAsync(trace)) + if (!await this.inputStage.MoveNextAsync(trace, cancellationToken)) { this.Current = default; return false; @@ -142,7 +139,7 @@ public override async ValueTask MoveNextAsync(ITrace trace) List distinctResults = new List(); foreach (CosmosElement document in sourcePage.Documents) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (this.distinctMap.Add(document, out UInt128 _)) { @@ -171,12 +168,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: distinctResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: state); + state: state, + streaming: sourcePage.Streaming); } else { @@ -184,12 +181,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: distinctResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: ClientDistinctQueryPipelineStage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: null); + state: null, + streaming: sourcePage.Streaming); } this.Current = TryCatch.FromResult(queryPage); diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.Compute.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.Compute.cs index 06921710c4..43489e6f30 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.Compute.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.Compute.cs @@ -28,15 +28,13 @@ private sealed class ComputeDistinctQueryPipelineStage : DistinctQueryPipelineSt private ComputeDistinctQueryPipelineStage( DistinctMap distinctMap, - IQueryPipelineStage source, - CancellationToken cancellationToken) - : base(distinctMap, source, cancellationToken) + IQueryPipelineStage source) + : base(distinctMap, source) { } public static TryCatch MonadicCreate( CosmosElement requestContinuation, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage, DistinctQueryType distinctQueryType) { @@ -68,7 +66,7 @@ public static TryCatch MonadicCreate( return TryCatch.FromException(tryCreateDistinctMap.Exception); } - TryCatch tryCreateSource = monadicCreatePipelineStage(distinctContinuationToken.SourceToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(distinctContinuationToken.SourceToken); if (!tryCreateSource.Succeeded) { return TryCatch.FromException(tryCreateSource.Exception); @@ -77,18 +75,17 @@ public static TryCatch MonadicCreate( return TryCatch.FromResult( new ComputeDistinctQueryPipelineStage( tryCreateDistinctMap.Result, - tryCreateSource.Result, - cancellationToken)); + tryCreateSource.Result)); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - if (!await this.inputStage.MoveNextAsync(trace)) + if (!await this.inputStage.MoveNextAsync(trace, cancellationToken)) { this.Current = default; return false; @@ -129,12 +126,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: distinctResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: ComputeDistinctQueryPipelineStage.UseTryGetContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: queryState); + state: queryState, + streaming: sourcePage.Streaming); this.Current = TryCatch.FromResult(queryPage); return true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.cs index 088c64cc23..c929f84dcb 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Distinct/DistinctQueryPipelineStage.cs @@ -5,9 +5,7 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.Distinct { using System; - using System.Threading; using Microsoft.Azure.Cosmos.CosmosElements; - using Microsoft.Azure.Cosmos.Query.Core.ExecutionContext; using Microsoft.Azure.Cosmos.Query.Core.Monads; /// @@ -30,9 +28,8 @@ internal abstract partial class DistinctQueryPipelineStage : QueryPipelineStageB protected DistinctQueryPipelineStage( DistinctMap distinctMap, - IQueryPipelineStage source, - CancellationToken cancellationToken) - : base(source, cancellationToken) + IQueryPipelineStage source) + : base(source) { this.distinctMap = distinctMap ?? throw new ArgumentNullException(nameof(distinctMap)); } @@ -40,18 +37,15 @@ protected DistinctQueryPipelineStage( public static TryCatch MonadicCreate( ExecutionEnvironment executionEnvironment, CosmosElement requestContinuation, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage, DistinctQueryType distinctQueryType) => executionEnvironment switch { ExecutionEnvironment.Client => ClientDistinctQueryPipelineStage.MonadicCreate( requestContinuation, - cancellationToken, monadicCreatePipelineStage, distinctQueryType), ExecutionEnvironment.Compute => ComputeDistinctQueryPipelineStage.MonadicCreate( requestContinuation, - cancellationToken, monadicCreatePipelineStage, distinctQueryType), _ => throw new ArgumentException($"Unknown {nameof(ExecutionEnvironment)}: {executionEnvironment}."), diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/EmptyQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/EmptyQueryPipelineStage.cs index 78c4fe9769..298de02386 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/EmptyQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/EmptyQueryPipelineStage.cs @@ -29,14 +29,9 @@ public ValueTask DisposeAsync() return this.emptyAsyncEnumerator.DisposeAsync(); } - public ValueTask MoveNextAsync(ITrace trace) + public ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { return this.emptyAsyncEnumerator.MoveNextAsync(trace); } - - public void SetCancellationToken(CancellationToken cancellationToken) - { - // No work to do since this enumerator is fully sync. - } } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/FaultedQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/FaultedQueryPipelineStage.cs index 01eb048472..bb90db3603 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/FaultedQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/FaultedQueryPipelineStage.cs @@ -33,14 +33,9 @@ public ValueTask DisposeAsync() return this.justAsyncEnumerator.DisposeAsync(); } - public ValueTask MoveNextAsync(ITrace trace) + public ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { return this.justAsyncEnumerator.MoveNextAsync(trace); } - - public void SetCancellationToken(CancellationToken cancellationToken) - { - // No work to do with since this enumerator is fully sync. - } } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.Client.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.Client.cs index cea8f6e566..6ed1804abf 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.Client.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.Client.cs @@ -6,7 +6,6 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.GroupBy { using System; using System.Collections.Generic; - using System.Collections.Immutable; using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.CosmosElements; @@ -22,16 +21,14 @@ private sealed class ClientGroupByQueryPipelineStage : GroupByQueryPipelineStage public const string ContinuationTokenNotSupportedWithGroupBy = "Continuation token is not supported for queries with GROUP BY. Do not use FeedResponse.ResponseContinuation or remove the GROUP BY from the query."; private ClientGroupByQueryPipelineStage( IQueryPipelineStage source, - CancellationToken cancellationToken, GroupingTable groupingTable, int pageSize) - : base(source, cancellationToken, groupingTable, pageSize) + : base(source, groupingTable, pageSize) { } public static TryCatch MonadicCreate( CosmosElement requestContinuation, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage, IReadOnlyList aggregates, IReadOnlyDictionary groupByAliasToAggregateType, @@ -51,7 +48,7 @@ public static TryCatch MonadicCreate( return TryCatch.FromException(tryCreateGroupingTable.Exception); } - TryCatch tryCreateSource = monadicCreatePipelineStage(requestContinuation, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(requestContinuation); if (tryCreateSource.Failed) { return tryCreateSource; @@ -59,16 +56,15 @@ public static TryCatch MonadicCreate( IQueryPipelineStage stage = new ClientGroupByQueryPipelineStage( tryCreateSource.Result, - cancellationToken, tryCreateGroupingTable.Result, pageSize); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { @@ -84,12 +80,11 @@ public override async ValueTask MoveNextAsync(ITrace trace) // Draining GROUP BY is broken down into two stages: double requestCharge = 0.0; - long responseLengthInBytes = 0; IReadOnlyDictionary addtionalHeaders = null; - while (await this.inputStage.MoveNextAsync(trace)) + while (await this.inputStage.MoveNextAsync(trace, cancellationToken)) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); // Stage 1: // Drain the groupings fully from all continuation and all partitions @@ -103,7 +98,6 @@ public override async ValueTask MoveNextAsync(ITrace trace) QueryPage sourcePage = tryGetSourcePage.Result; requestCharge += sourcePage.RequestCharge; - responseLengthInBytes += sourcePage.ResponseLengthInBytes; addtionalHeaders = sourcePage.AdditionalHeaders; this.AggregateGroupings(sourcePage.Documents); } @@ -120,12 +114,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: results, requestCharge: requestCharge, activityId: default, - responseLengthInBytes: responseLengthInBytes, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: ClientGroupByQueryPipelineStage.ContinuationTokenNotSupportedWithGroupBy, additionalHeaders: addtionalHeaders, - state: default); + state: default, + streaming: null); this.Current = TryCatch.FromResult(queryPage); return true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.Compute.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.Compute.cs index 079c69465f..2692e9ba6b 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.Compute.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.Compute.cs @@ -9,7 +9,6 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.GroupBy using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.CosmosElements; - using Microsoft.Azure.Cosmos.CosmosElements.Numbers; using Microsoft.Azure.Cosmos.Query.Core.Exceptions; using Microsoft.Azure.Cosmos.Query.Core.Metrics; using Microsoft.Azure.Cosmos.Query.Core.Monads; @@ -29,16 +28,14 @@ private sealed class ComputeGroupByQueryPipelineStage : GroupByQueryPipelineStag private ComputeGroupByQueryPipelineStage( IQueryPipelineStage source, - CancellationToken cancellationToken, GroupingTable groupingTable, int pageSize) - : base(source, cancellationToken, groupingTable, pageSize) + : base(source, groupingTable, pageSize) { } public static TryCatch MonadicCreate( CosmosElement requestContinuation, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage, IReadOnlyList aggregates, IReadOnlyDictionary groupByAliasToAggregateType, @@ -71,7 +68,7 @@ public static TryCatch MonadicCreate( } else { - tryCreateSource = monadicCreatePipelineStage(groupByContinuationToken.SourceContinuationToken, cancellationToken); + tryCreateSource = monadicCreatePipelineStage(groupByContinuationToken.SourceContinuationToken); } if (!tryCreateSource.Succeeded) @@ -94,14 +91,13 @@ public static TryCatch MonadicCreate( return TryCatch.FromResult( new ComputeGroupByQueryPipelineStage( tryCreateSource.Result, - cancellationToken, tryCreateGroupingTable.Result, pageSize)); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { @@ -116,7 +112,7 @@ public override async ValueTask MoveNextAsync(ITrace trace) // Draining GROUP BY is broken down into two stages: QueryPage queryPage; - if (await this.inputStage.MoveNextAsync(trace)) + if (await this.inputStage.MoveNextAsync(trace, cancellationToken)) { // Stage 1: // Drain the groupings fully from all continuation and all partitions @@ -142,12 +138,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: EmptyResults, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: null, additionalHeaders: sourcePage.AdditionalHeaders, - state: state); + state: state, + streaming: sourcePage.Streaming); } else { @@ -173,12 +169,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: results, requestCharge: default, activityId: default, - responseLengthInBytes: default, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: default, - state: state); + state: state, + streaming: null); } this.Current = TryCatch.FromResult(queryPage); diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.cs index f42be927cc..58e400b72e 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/GroupBy/GroupByQueryPipelineStage.cs @@ -53,10 +53,9 @@ internal abstract partial class GroupByQueryPipelineStage : QueryPipelineStageBa protected GroupByQueryPipelineStage( IQueryPipelineStage source, - CancellationToken cancellationToken, GroupingTable groupingTable, int pageSize) - : base(source, cancellationToken) + : base(source) { this.groupingTable = groupingTable ?? throw new ArgumentNullException(nameof(groupingTable)); this.pageSize = pageSize; @@ -65,7 +64,6 @@ protected GroupByQueryPipelineStage( public static TryCatch MonadicCreate( ExecutionEnvironment executionEnvironment, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage, IReadOnlyList aggregates, IReadOnlyDictionary groupByAliasToAggregateType, @@ -75,7 +73,6 @@ public static TryCatch MonadicCreate( { ExecutionEnvironment.Client => ClientGroupByQueryPipelineStage.MonadicCreate( continuationToken, - cancellationToken, monadicCreatePipelineStage, aggregates, groupByAliasToAggregateType, @@ -84,7 +81,6 @@ public static TryCatch MonadicCreate( pageSize), ExecutionEnvironment.Compute => ComputeGroupByQueryPipelineStage.MonadicCreate( continuationToken, - cancellationToken, monadicCreatePipelineStage, aggregates, groupByAliasToAggregateType, diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/IQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/IQueryPipelineStage.cs index 11796385cb..151ba5db9a 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/IQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/IQueryPipelineStage.cs @@ -4,17 +4,12 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline { - using System; - using System.Collections.Generic; using System.Threading; - using System.Threading.Tasks; using Microsoft.Azure.Cosmos.Pagination; using Microsoft.Azure.Cosmos.Query.Core.Monads; using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; - using Microsoft.Azure.Cosmos.Tracing; internal interface IQueryPipelineStage : ITracingAsyncEnumerator> { - void SetCancellationToken(CancellationToken cancellationToken); } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/IndexUtilizationHelper.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/IndexUtilizationHelper.cs new file mode 100644 index 0000000000..cb7a7add84 --- /dev/null +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/IndexUtilizationHelper.cs @@ -0,0 +1,48 @@ +// ------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +// ------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline +{ + using System.Collections.Generic; + + internal static class IndexUtilizationHelper + { + public static IReadOnlyDictionary AccumulateIndexUtilization( + IReadOnlyDictionary cumulativeHeaders, + IReadOnlyDictionary currentHeaders) + { + if (cumulativeHeaders == null) + { + return currentHeaders; + } + + if (currentHeaders == null) + { + return cumulativeHeaders; + } + + // Index utilization is supposed to be static across partitions and round trips. + if (currentHeaders.ContainsKey(Documents.HttpConstants.HttpHeaders.IndexUtilization) || + !cumulativeHeaders.ContainsKey(Documents.HttpConstants.HttpHeaders.IndexUtilization)) + { + return currentHeaders; + } + + // We could try to cast currentHeaders to a dictionary, but this can cause unpleasant side effects with singletons + Dictionary additionalHeaders = new Dictionary(); + + // Until we get the new .NET version, we need to copy the headers manually. + foreach (KeyValuePair header in currentHeaders) + { + additionalHeaders.Add(header.Key, header.Value); + } + + additionalHeaders.Add( + Documents.HttpConstants.HttpHeaders.IndexUtilization, + cumulativeHeaders[Documents.HttpConstants.HttpHeaders.IndexUtilization]); + + return additionalHeaders; + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/LazyQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/LazyQueryPipelineStage.cs index 3f7a7d316e..7e83643cc2 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/LazyQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/LazyQueryPipelineStage.cs @@ -14,12 +14,10 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline internal sealed class LazyQueryPipelineStage : IQueryPipelineStage { private readonly AsyncLazy> lazyTryCreateStage; - private CancellationToken cancellationToken; - public LazyQueryPipelineStage(AsyncLazy> lazyTryCreateStage, CancellationToken cancellationToken) + public LazyQueryPipelineStage(AsyncLazy> lazyTryCreateStage) { this.lazyTryCreateStage = lazyTryCreateStage ?? throw new ArgumentNullException(nameof(lazyTryCreateStage)); - this.cancellationToken = cancellationToken; } public TryCatch Current { get; private set; } @@ -38,14 +36,14 @@ public ValueTask DisposeAsync() return default; } - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - TryCatch tryCreateStage = await this.lazyTryCreateStage.GetValueAsync(trace, this.cancellationToken); + TryCatch tryCreateStage = await this.lazyTryCreateStage.GetValueAsync(trace, cancellationToken); if (tryCreateStage.Failed) { this.Current = TryCatch.FromException(tryCreateStage.Exception); @@ -53,8 +51,7 @@ public async ValueTask MoveNextAsync(ITrace trace) } IQueryPipelineStage stage = tryCreateStage.Result; - stage.SetCancellationToken(this.cancellationToken); - if (!await stage.MoveNextAsync(trace)) + if (!await stage.MoveNextAsync(trace, cancellationToken)) { this.Current = default; return false; @@ -63,10 +60,5 @@ public async ValueTask MoveNextAsync(ITrace trace) this.Current = stage.Current; return true; } - - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.cancellationToken = cancellationToken; - } } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/MonadicCreatePipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/MonadicCreatePipelineStage.cs index 9067eb471b..cffac556e0 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/MonadicCreatePipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/MonadicCreatePipelineStage.cs @@ -8,5 +8,5 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline using Microsoft.Azure.Cosmos.CosmosElements; using Microsoft.Azure.Cosmos.Query.Core.Monads; - internal delegate TryCatch MonadicCreatePipelineStage(CosmosElement continuationToken, CancellationToken cancellationToken); + internal delegate TryCatch MonadicCreatePipelineStage(CosmosElement continuationToken); } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/NameCacheStaleRetryQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/NameCacheStaleRetryQueryPipelineStage.cs index 177589193c..50fbb31b35 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/NameCacheStaleRetryQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/NameCacheStaleRetryQueryPipelineStage.cs @@ -36,9 +36,9 @@ public ValueTask DisposeAsync() return this.currentQueryPipelineStage.DisposeAsync(); } - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - if (!await this.currentQueryPipelineStage.MoveNextAsync(trace)) + if (!await this.currentQueryPipelineStage.MoveNextAsync(trace, cancellationToken)) { return false; } @@ -61,16 +61,11 @@ await this.cosmosQueryContext.QueryClient.ForceRefreshCollectionCacheAsync( this.alreadyRetried = true; await this.currentQueryPipelineStage.DisposeAsync(); this.currentQueryPipelineStage = this.queryPipelineStageFactory(); - return await this.MoveNextAsync(trace); + return await this.MoveNextAsync(trace, cancellationToken); } } return true; } - - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.currentQueryPipelineStage.SetCancellationToken(cancellationToken); - } } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/OptimisticDirectExecution/OptimisticDirectExecutionQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/OptimisticDirectExecution/OptimisticDirectExecutionQueryPipelineStage.cs index 7e571d0a41..8bad485a32 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/OptimisticDirectExecution/OptimisticDirectExecutionQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/OptimisticDirectExecution/OptimisticDirectExecutionQueryPipelineStage.cs @@ -58,9 +58,9 @@ public ValueTask DisposeAsync() return this.inner.Failed ? default : this.inner.Result.DisposeAsync(); } - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - TryCatch hasNext = await this.inner.TryAsync(pipelineStage => pipelineStage.MoveNextAsync(trace)); + TryCatch hasNext = await this.inner.TryAsync(pipelineStage => pipelineStage.MoveNextAsync(trace, cancellationToken)); bool success = hasNext.Succeeded && hasNext.Result; if (this.executionState == ExecutionState.OptimisticDirectExecution) { @@ -91,7 +91,7 @@ public async ValueTask MoveNextAsync(ITrace trace) { // This is where we will unwrap tne continuation token and extract the client distribution plan // Pipelines to handle client distribution would be generated here - success = await this.SwitchToFallbackPipelineAsync(continuationToken: null, trace); + success = await this.SwitchToFallbackPipelineAsync(continuationToken: null, trace, cancellationToken); } this.previousRequiresDistribution = requiresDistribution; @@ -99,18 +99,13 @@ public async ValueTask MoveNextAsync(ITrace trace) } else if (isPartitionSplitException) { - success = await this.SwitchToFallbackPipelineAsync(continuationToken: UnwrapContinuationToken(this.continuationToken), trace); + success = await this.SwitchToFallbackPipelineAsync(continuationToken: UnwrapContinuationToken(this.continuationToken), trace, cancellationToken); } } return success; } - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.inner.Try(pipelineStage => pipelineStage.SetCancellationToken(cancellationToken)); - } - private static CosmosElement UnwrapContinuationToken(CosmosElement continuationToken) { if (continuationToken == null) return null; @@ -122,7 +117,7 @@ private static CosmosElement UnwrapContinuationToken(CosmosElement continuationT return CosmosArray.Create(backendContinuationToken); } - private async Task SwitchToFallbackPipelineAsync(CosmosElement continuationToken, ITrace trace) + private async Task SwitchToFallbackPipelineAsync(CosmosElement continuationToken, ITrace trace, CancellationToken cancellationToken) { Debug.Assert(this.executionState == ExecutionState.OptimisticDirectExecution, "OptimisticDirectExecuteQueryPipelineStage Assert!", "Only OptimisticDirectExecute pipeline can create this fallback pipeline"); this.executionState = ExecutionState.SpecializedDocumentQueryExecution; @@ -135,7 +130,7 @@ private async Task SwitchToFallbackPipelineAsync(CosmosElement continuatio return false; } - return await this.inner.Result.MoveNextAsync(trace); + return await this.inner.Result.MoveNextAsync(trace, cancellationToken); } public static TryCatch MonadicCreate( @@ -182,19 +177,14 @@ public ValueTask DisposeAsync() return this.queryPartitionRangePageAsyncEnumerator.DisposeAsync(); } - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.queryPartitionRangePageAsyncEnumerator.SetCancellationToken(cancellationToken); - } - - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - if (!await this.queryPartitionRangePageAsyncEnumerator.MoveNextAsync(trace)) + if (!await this.queryPartitionRangePageAsyncEnumerator.MoveNextAsync(trace, cancellationToken)) { this.Current = default; return false; @@ -230,12 +220,12 @@ public async ValueTask MoveNextAsync(ITrace trace) backendQueryPage.Documents, backendQueryPage.RequestCharge, backendQueryPage.ActivityId, - backendQueryPage.ResponseLengthInBytes, backendQueryPage.CosmosQueryExecutionInfo, backendQueryPage.DistributionPlanSpec, disallowContinuationTokenMessage: null, backendQueryPage.AdditionalHeaders, - queryState); + queryState, + backendQueryPage.Streaming); this.Current = TryCatch.FromResult(queryPage); return true; @@ -282,8 +272,7 @@ public static TryCatch MonadicCreate( updatedSqlQuerySpec, feedRangeState, partitionKey, - queryPaginationOptions, - cancellationToken); + queryPaginationOptions); OptimisticDirectExecutionQueryPipelineImpl stage = new OptimisticDirectExecutionQueryPipelineImpl(partitionPageEnumerator); return TryCatch.FromResult(stage); diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Pagination/QueryPage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Pagination/QueryPage.cs index eab1f61c79..942d642110 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Pagination/QueryPage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Pagination/QueryPage.cs @@ -1,55 +1,56 @@ -// ------------------------------------------------------------ -// Copyright (c) Microsoft Corporation. All rights reserved. -// ------------------------------------------------------------ - -namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination -{ - using System; - using System.Collections.Generic; - using System.Collections.Immutable; - using System.Linq; - using Microsoft.Azure.Cosmos.CosmosElements; - using Microsoft.Azure.Cosmos.Pagination; - using Microsoft.Azure.Cosmos.Query.Core.QueryClient; - using Newtonsoft.Json; - - internal sealed class QueryPage : Page - { - public static readonly ImmutableHashSet BannedHeaders = new HashSet() - { - Microsoft.Azure.Documents.HttpConstants.HttpHeaders.Continuation, - Microsoft.Azure.Documents.HttpConstants.HttpHeaders.ContinuationToken, - }.Concat(BannedHeadersBase).ToImmutableHashSet(); - - public QueryPage( - IReadOnlyList documents, - double requestCharge, - string activityId, - long responseLengthInBytes, - Lazy cosmosQueryExecutionInfo, - DistributionPlanSpec distributionPlanSpec, - string disallowContinuationTokenMessage, - IReadOnlyDictionary additionalHeaders, - QueryState state) - : base(requestCharge, activityId, additionalHeaders, state) - { - this.Documents = documents ?? throw new ArgumentNullException(nameof(documents)); - this.ResponseLengthInBytes = responseLengthInBytes < 0 ? throw new ArgumentOutOfRangeException(nameof(responseLengthInBytes)) : responseLengthInBytes; - this.CosmosQueryExecutionInfo = cosmosQueryExecutionInfo; - this.DistributionPlanSpec = distributionPlanSpec; - this.DisallowContinuationTokenMessage = disallowContinuationTokenMessage; - } - - public IReadOnlyList Documents { get; } - - public long ResponseLengthInBytes { get; } - - public Lazy CosmosQueryExecutionInfo { get; } - - public DistributionPlanSpec DistributionPlanSpec { get; } - - public string DisallowContinuationTokenMessage { get; } - - protected override ImmutableHashSet DerivedClassBannedHeaders => QueryPage.BannedHeaders; - } +// ------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +// ------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination +{ + using System; + using System.Collections.Generic; + using System.Collections.Immutable; + using System.Linq; + using Microsoft.Azure.Cosmos.CosmosElements; + using Microsoft.Azure.Cosmos.Pagination; + using Microsoft.Azure.Cosmos.Query.Core.QueryClient; + + internal sealed class QueryPage : Page + { + public static readonly ImmutableHashSet BannedHeaders = new HashSet() + { + Microsoft.Azure.Documents.HttpConstants.HttpHeaders.Continuation, + Microsoft.Azure.Documents.HttpConstants.HttpHeaders.ContinuationToken, + }.Concat(BannedHeadersBase).ToImmutableHashSet(); + + public QueryPage( + IReadOnlyList documents, + double requestCharge, + string activityId, + Lazy cosmosQueryExecutionInfo, + DistributionPlanSpec distributionPlanSpec, + string disallowContinuationTokenMessage, + IReadOnlyDictionary additionalHeaders, + QueryState state, + bool? streaming) + : base(requestCharge, activityId, additionalHeaders, state) + { + this.Documents = documents ?? throw new ArgumentNullException(nameof(documents)); + this.CosmosQueryExecutionInfo = cosmosQueryExecutionInfo; + this.DistributionPlanSpec = distributionPlanSpec; + this.DisallowContinuationTokenMessage = disallowContinuationTokenMessage; + this.Streaming = streaming; + } + + public IReadOnlyList Documents { get; } + + public Lazy CosmosQueryExecutionInfo { get; } + + public DistributionPlanSpec DistributionPlanSpec { get; } + + public string DisallowContinuationTokenMessage { get; } + + public bool? Streaming { get; } + + public override int ItemCount => this.Documents.Count; + + protected override ImmutableHashSet DerivedClassBannedHeaders => QueryPage.BannedHeaders; + } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Pagination/QueryPaginationOptions.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Pagination/QueryPaginationOptions.cs index c985158828..e0f179a127 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Pagination/QueryPaginationOptions.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Pagination/QueryPaginationOptions.cs @@ -32,7 +32,7 @@ internal sealed class QueryPaginationOptions : PaginationOptions public QueryPaginationOptions( int? pageSizeHint = null, bool optimisticDirectExecute = false, - Dictionary additionalHeaders = null) + IReadOnlyDictionary additionalHeaders = null) : base(pageSizeHint, additionalHeaders) { this.OptimisticDirectExecute = optimisticDirectExecute; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/PipelineFactory.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/PipelineFactory.cs index a119d5866a..1ae5312c2e 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/PipelineFactory.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/PipelineFactory.cs @@ -33,8 +33,7 @@ public static TryCatch MonadicCreate( QueryInfo queryInfo, QueryPaginationOptions queryPaginationOptions, int maxConcurrency, - CosmosElement requestContinuationToken, - CancellationToken requestCancellationToken) + CosmosElement requestContinuationToken) { if (documentContainer == null) { @@ -68,7 +67,7 @@ public static TryCatch MonadicCreate( MonadicCreatePipelineStage monadicCreatePipelineStage; if (queryInfo.HasOrderBy) { - monadicCreatePipelineStage = (continuationToken, cancellationToken) => OrderByCrossPartitionQueryPipelineStage.MonadicCreate( + monadicCreatePipelineStage = (continuationToken) => OrderByCrossPartitionQueryPipelineStage.MonadicCreate( documentContainer: documentContainer, sqlQuerySpec: sqlQuerySpec, targetRanges: targetRanges, @@ -78,12 +77,11 @@ public static TryCatch MonadicCreate( .Zip(queryInfo.OrderBy, (expression, sortOrder) => new OrderByColumn(expression, sortOrder)).ToList(), queryPaginationOptions: queryPaginationOptions, maxConcurrency: maxConcurrency, - continuationToken: continuationToken, - cancellationToken: cancellationToken); + continuationToken: continuationToken); } else { - monadicCreatePipelineStage = (continuationToken, cancellationToken) => ParallelCrossPartitionQueryPipelineStage.MonadicCreate( + monadicCreatePipelineStage = (continuationToken) => ParallelCrossPartitionQueryPipelineStage.MonadicCreate( documentContainer: documentContainer, sqlQuerySpec: sqlQuerySpec, targetRanges: targetRanges, @@ -91,31 +89,28 @@ public static TryCatch MonadicCreate( partitionKey: partitionKey, prefetchPolicy: prefetchPolicy, maxConcurrency: maxConcurrency, - continuationToken: continuationToken, - cancellationToken: cancellationToken); + continuationToken: continuationToken); } if (queryInfo.HasAggregates && !queryInfo.HasGroupBy) { MonadicCreatePipelineStage monadicCreateSourceStage = monadicCreatePipelineStage; - monadicCreatePipelineStage = (continuationToken, cancellationToken) => AggregateQueryPipelineStage.MonadicCreate( + monadicCreatePipelineStage = (continuationToken) => AggregateQueryPipelineStage.MonadicCreate( executionEnvironment, queryInfo.Aggregates, queryInfo.GroupByAliasToAggregateType, queryInfo.GroupByAliases, queryInfo.HasSelectValue, continuationToken, - cancellationToken, monadicCreateSourceStage); } if (queryInfo.HasDistinct) { MonadicCreatePipelineStage monadicCreateSourceStage = monadicCreatePipelineStage; - monadicCreatePipelineStage = (continuationToken, cancellationToken) => DistinctQueryPipelineStage.MonadicCreate( + monadicCreatePipelineStage = (continuationToken) => DistinctQueryPipelineStage.MonadicCreate( executionEnvironment, continuationToken, - cancellationToken, monadicCreateSourceStage, queryInfo.DistinctType); } @@ -123,10 +118,9 @@ public static TryCatch MonadicCreate( if (queryInfo.HasGroupBy) { MonadicCreatePipelineStage monadicCreateSourceStage = monadicCreatePipelineStage; - monadicCreatePipelineStage = (continuationToken, cancellationToken) => GroupByQueryPipelineStage.MonadicCreate( + monadicCreatePipelineStage = (continuationToken) => GroupByQueryPipelineStage.MonadicCreate( executionEnvironment, continuationToken, - cancellationToken, monadicCreateSourceStage, queryInfo.Aggregates, queryInfo.GroupByAliasToAggregateType, @@ -138,49 +132,45 @@ public static TryCatch MonadicCreate( if (queryInfo.HasOffset) { MonadicCreatePipelineStage monadicCreateSourceStage = monadicCreatePipelineStage; - monadicCreatePipelineStage = (continuationToken, cancellationToken) => SkipQueryPipelineStage.MonadicCreate( + monadicCreatePipelineStage = (continuationToken) => SkipQueryPipelineStage.MonadicCreate( executionEnvironment, queryInfo.Offset.Value, continuationToken, - cancellationToken, monadicCreateSourceStage); } if (queryInfo.HasLimit) { MonadicCreatePipelineStage monadicCreateSourceStage = monadicCreatePipelineStage; - monadicCreatePipelineStage = (continuationToken, cancellationToken) => TakeQueryPipelineStage.MonadicCreateLimitStage( + monadicCreatePipelineStage = (continuationToken) => TakeQueryPipelineStage.MonadicCreateLimitStage( executionEnvironment, queryInfo.Limit.Value, continuationToken, - cancellationToken, monadicCreateSourceStage); } if (queryInfo.HasTop) { MonadicCreatePipelineStage monadicCreateSourceStage = monadicCreatePipelineStage; - monadicCreatePipelineStage = (continuationToken, cancellationToken) => TakeQueryPipelineStage.MonadicCreateTopStage( + monadicCreatePipelineStage = (continuationToken) => TakeQueryPipelineStage.MonadicCreateTopStage( executionEnvironment, queryInfo.Top.Value, continuationToken, - cancellationToken, monadicCreateSourceStage); } if (queryInfo.HasDCount) { MonadicCreatePipelineStage monadicCreateSourceStage = monadicCreatePipelineStage; - monadicCreatePipelineStage = (continuationToken, cancellationToken) => DCountQueryPipelineStage.MonadicCreate( + monadicCreatePipelineStage = (continuationToken) => DCountQueryPipelineStage.MonadicCreate( executionEnvironment, queryInfo.DCountInfo, continuationToken, - cancellationToken, monadicCreateSourceStage); } - return monadicCreatePipelineStage(requestContinuationToken, requestCancellationToken) - .Try(onSuccess: (stage) => new SkipEmptyPageQueryPipelineStage(stage, requestCancellationToken)); + return monadicCreatePipelineStage(requestContinuationToken) + .Try(onSuccess: stage => new SkipEmptyPageQueryPipelineStage(stage)); } private static PrefetchPolicy DeterminePrefetchPolicy(QueryInfo queryInfo) diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/QueryPipelineStageBase.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/QueryPipelineStageBase.cs index 5cfc69f0d9..561bbd89ae 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/QueryPipelineStageBase.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/QueryPipelineStageBase.cs @@ -14,12 +14,10 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline internal abstract class QueryPipelineStageBase : IQueryPipelineStage { protected readonly IQueryPipelineStage inputStage; - protected CancellationToken cancellationToken; - protected QueryPipelineStageBase(IQueryPipelineStage inputStage, CancellationToken cancellationToken) + protected QueryPipelineStageBase(IQueryPipelineStage inputStage) { this.inputStage = inputStage ?? throw new ArgumentNullException(nameof(inputStage)); - this.cancellationToken = cancellationToken; } public TryCatch Current { get; protected set; } @@ -29,14 +27,6 @@ public ValueTask DisposeAsync() return this.inputStage.DisposeAsync(); } - public abstract ValueTask MoveNextAsync(ITrace trace); - - public void SetCancellationToken(CancellationToken cancellationToken) - { - // Only here to support legacy query iterator and ExecuteNextAsync - // can be removed only we only expose IAsyncEnumerable in v4 sdk. - this.cancellationToken = cancellationToken; - this.inputStage.SetCancellationToken(cancellationToken); - } + public abstract ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken); } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.Client.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.Client.cs index 7f80df9930..e04b75207d 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.Client.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.Client.cs @@ -22,9 +22,8 @@ private sealed class ClientSkipQueryPipelineStage : SkipQueryPipelineStage { private ClientSkipQueryPipelineStage( IQueryPipelineStage source, - CancellationToken cancellationToken, long skipCount) - : base(source, cancellationToken, skipCount) + : base(source, skipCount) { // Work is done in base constructor. } @@ -32,7 +31,6 @@ private ClientSkipQueryPipelineStage( public static TryCatch MonadicCreate( int offsetCount, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { if (monadicCreatePipelineStage == null) @@ -81,7 +79,7 @@ public static TryCatch MonadicCreate( sourceToken = null; } - TryCatch tryCreateSource = monadicCreatePipelineStage(sourceToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(sourceToken); if (tryCreateSource.Failed) { return tryCreateSource; @@ -89,22 +87,21 @@ public static TryCatch MonadicCreate( IQueryPipelineStage stage = new ClientSkipQueryPipelineStage( tryCreateSource.Result, - cancellationToken, offsetContinuationToken.Offset); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - if (!await this.inputStage.MoveNextAsync(trace)) + if (!await this.inputStage.MoveNextAsync(trace, cancellationToken)) { this.Current = default; return false; @@ -142,12 +139,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: documentsAfterSkip, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: state); + state: state, + streaming: sourcePage.Streaming); this.Current = TryCatch.FromResult(queryPage); return true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.Compute.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.Compute.cs index a7cd0ddea2..8acb424510 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.Compute.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.Compute.cs @@ -20,8 +20,8 @@ internal abstract partial class SkipQueryPipelineStage : QueryPipelineStageBase { private sealed class ComputeSkipQueryPipelineStage : SkipQueryPipelineStage { - private ComputeSkipQueryPipelineStage(IQueryPipelineStage source, CancellationToken cancellationToken, long skipCount) - : base(source, cancellationToken, skipCount) + private ComputeSkipQueryPipelineStage(IQueryPipelineStage source, long skipCount) + : base(source, skipCount) { // Work is done in base constructor. } @@ -29,7 +29,6 @@ private ComputeSkipQueryPipelineStage(IQueryPipelineStage source, CancellationTo public static TryCatch MonadicCreate( int offsetCount, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { if (monadicCreatePipelineStage == null) @@ -61,7 +60,7 @@ public static TryCatch MonadicCreate( "offset count in continuation token can not be greater than the offsetcount in the query.")); } - TryCatch tryCreateSource = monadicCreatePipelineStage(offsetContinuationToken.SourceToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(offsetContinuationToken.SourceToken); if (tryCreateSource.Failed) { return tryCreateSource; @@ -69,22 +68,21 @@ public static TryCatch MonadicCreate( IQueryPipelineStage stage = new ComputeSkipQueryPipelineStage( tryCreateSource.Result, - cancellationToken, offsetContinuationToken.Offset); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - if (!await this.inputStage.MoveNextAsync(trace)) + if (!await this.inputStage.MoveNextAsync(trace, cancellationToken)) { this.Current = default; return false; @@ -123,12 +121,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: documentsAfterSkip, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: state); + state: state, + streaming: sourcePage.Streaming); this.Current = TryCatch.FromResult(queryPage); return true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.cs index ef4d279ecf..adf84a9fb6 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Skip/SkipQueryPipelineStage.cs @@ -19,9 +19,8 @@ internal abstract partial class SkipQueryPipelineStage : QueryPipelineStageBase protected SkipQueryPipelineStage( IQueryPipelineStage source, - CancellationToken cancellationToken, long skipCount) - : base(source, cancellationToken) + : base(source) { if (skipCount > int.MaxValue) { @@ -35,7 +34,6 @@ public static TryCatch MonadicCreate( ExecutionEnvironment executionEnvironment, int offsetCount, CosmosElement continuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { TryCatch tryCreate = executionEnvironment switch @@ -43,12 +41,10 @@ public static TryCatch MonadicCreate( ExecutionEnvironment.Client => ClientSkipQueryPipelineStage.MonadicCreate( offsetCount, continuationToken, - cancellationToken, monadicCreatePipelineStage), ExecutionEnvironment.Compute => ComputeSkipQueryPipelineStage.MonadicCreate( offsetCount, continuationToken, - cancellationToken, monadicCreatePipelineStage), _ => throw new ArgumentException($"Unknown {nameof(ExecutionEnvironment)}: {executionEnvironment}"), }; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/SkipEmptyPageQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/SkipEmptyPageQueryPipelineStage.cs index 95be4d4003..adb592d436 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/SkipEmptyPageQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/SkipEmptyPageQueryPipelineStage.cs @@ -6,13 +6,13 @@ namespace Microsoft.Azure.Cosmos.Query.Core.Pipeline { using System; using System.Collections.Generic; - using System.Collections.Immutable; using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.CosmosElements; using Microsoft.Azure.Cosmos.Query.Core.Monads; using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; using Microsoft.Azure.Cosmos.Tracing; + using static IndexUtilizationHelper; internal sealed class SkipEmptyPageQueryPipelineStage : IQueryPipelineStage { @@ -20,24 +20,21 @@ internal sealed class SkipEmptyPageQueryPipelineStage : IQueryPipelineStage private readonly IQueryPipelineStage inputStage; private double cumulativeRequestCharge; - private long cumulativeResponseLengthInBytes; private IReadOnlyDictionary cumulativeAdditionalHeaders; - private CancellationToken cancellationToken; private bool returnedFinalStats; - public SkipEmptyPageQueryPipelineStage(IQueryPipelineStage inputStage, CancellationToken cancellationToken) + public SkipEmptyPageQueryPipelineStage(IQueryPipelineStage inputStage) { this.inputStage = inputStage ?? throw new ArgumentNullException(nameof(inputStage)); - this.cancellationToken = cancellationToken; } public TryCatch Current { get; private set; } public ValueTask DisposeAsync() => this.inputStage.DisposeAsync(); - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { @@ -46,7 +43,7 @@ public async ValueTask MoveNextAsync(ITrace trace) for (int documentCount = 0; documentCount == 0;) { - if (!await this.inputStage.MoveNextAsync(trace)) + if (!await this.inputStage.MoveNextAsync(trace, cancellationToken)) { if (!this.returnedFinalStats) { @@ -54,14 +51,13 @@ public async ValueTask MoveNextAsync(ITrace trace) documents: EmptyPage, requestCharge: this.cumulativeRequestCharge, activityId: Guid.Empty.ToString(), - responseLengthInBytes: this.cumulativeResponseLengthInBytes, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: this.cumulativeAdditionalHeaders, - state: default); + state: default, + streaming: null); this.cumulativeRequestCharge = 0; - this.cumulativeResponseLengthInBytes = 0; this.cumulativeAdditionalHeaders = null; this.returnedFinalStats = true; this.Current = TryCatch.FromResult(queryPage); @@ -91,22 +87,24 @@ public async ValueTask MoveNextAsync(ITrace trace) documents: EmptyPage, requestCharge: sourcePage.RequestCharge + this.cumulativeRequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes + this.cumulativeResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, - additionalHeaders: sourcePage.AdditionalHeaders, - state: default); + additionalHeaders: AccumulateIndexUtilization( + cumulativeHeaders: this.cumulativeAdditionalHeaders, + currentHeaders: sourcePage.AdditionalHeaders), + state: default, + streaming: sourcePage.Streaming); this.cumulativeRequestCharge = 0; - this.cumulativeResponseLengthInBytes = 0; this.cumulativeAdditionalHeaders = null; this.Current = TryCatch.FromResult(queryPage); return true; } this.cumulativeRequestCharge += sourcePage.RequestCharge; - this.cumulativeResponseLengthInBytes += sourcePage.ResponseLengthInBytes; - this.cumulativeAdditionalHeaders = sourcePage.AdditionalHeaders; + this.cumulativeAdditionalHeaders = AccumulateIndexUtilization( + cumulativeHeaders: this.cumulativeAdditionalHeaders, + currentHeaders: sourcePage.AdditionalHeaders); } else { @@ -117,14 +115,15 @@ public async ValueTask MoveNextAsync(ITrace trace) documents: sourcePage.Documents, requestCharge: sourcePage.RequestCharge + this.cumulativeRequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes + this.cumulativeResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, - additionalHeaders: sourcePage.AdditionalHeaders, - state: sourcePage.State); + additionalHeaders: AccumulateIndexUtilization( + cumulativeHeaders: this.cumulativeAdditionalHeaders, + currentHeaders: sourcePage.AdditionalHeaders), + state: sourcePage.State, + streaming: sourcePage.Streaming); this.cumulativeRequestCharge = 0; - this.cumulativeResponseLengthInBytes = 0; this.cumulativeAdditionalHeaders = null; } else @@ -138,10 +137,5 @@ public async ValueTask MoveNextAsync(ITrace trace) return true; } - - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.cancellationToken = cancellationToken; - } } } diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.Client.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.Client.cs index 2d403ae45a..d7d0dea1b4 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.Client.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.Client.cs @@ -24,10 +24,9 @@ private sealed class ClientTakeQueryPipelineStage : TakeQueryPipelineStage private ClientTakeQueryPipelineStage( IQueryPipelineStage source, - CancellationToken cancellationToken, int takeCount, TakeEnum takeEnum) - : base(source, cancellationToken, takeCount) + : base(source, takeCount) { this.takeEnum = takeEnum; } @@ -35,7 +34,6 @@ private ClientTakeQueryPipelineStage( public static TryCatch MonadicCreateLimitStage( int limitCount, CosmosElement requestContinuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { if (limitCount < 0) @@ -89,7 +87,7 @@ public static TryCatch MonadicCreateLimitStage( sourceToken = null; } - TryCatch tryCreateSource = monadicCreatePipelineStage(sourceToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(sourceToken); if (tryCreateSource.Failed) { return tryCreateSource; @@ -97,7 +95,6 @@ public static TryCatch MonadicCreateLimitStage( IQueryPipelineStage stage = new ClientTakeQueryPipelineStage( tryCreateSource.Result, - cancellationToken, limitContinuationToken.Limit, TakeEnum.Limit); @@ -107,7 +104,6 @@ public static TryCatch MonadicCreateLimitStage( public static TryCatch MonadicCreateTopStage( int topCount, CosmosElement requestContinuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { if (topCount < 0) @@ -161,7 +157,7 @@ public static TryCatch MonadicCreateTopStage( sourceToken = null; } - TryCatch tryCreateSource = monadicCreatePipelineStage(sourceToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(sourceToken); if (tryCreateSource.Failed) { return tryCreateSource; @@ -169,23 +165,22 @@ public static TryCatch MonadicCreateTopStage( IQueryPipelineStage stage = new ClientTakeQueryPipelineStage( tryCreateSource.Result, - cancellationToken, topContinuationToken.Top, TakeEnum.Top); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - if (this.ReturnedFinalPage || !await this.inputStage.MoveNextAsync(trace)) + if (this.ReturnedFinalPage || !await this.inputStage.MoveNextAsync(trace, cancellationToken)) { this.Current = default; this.takeCount = 0; @@ -229,12 +224,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: takedDocuments, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: state); + state: state, + streaming: sourcePage.Streaming); this.Current = TryCatch.FromResult(queryPage); return true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.Compute.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.Compute.cs index 7e1e8d6f62..fd6e0bd5ff 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.Compute.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.Compute.cs @@ -22,9 +22,8 @@ private sealed class ComputeTakeQueryPipelineStage : TakeQueryPipelineStage { private ComputeTakeQueryPipelineStage( IQueryPipelineStage source, - CancellationToken cancellationToken, int takeCount) - : base(source, cancellationToken, takeCount) + : base(source, takeCount) { // Work is done in the base class. } @@ -32,27 +31,22 @@ private ComputeTakeQueryPipelineStage( public static TryCatch MonadicCreateLimitStage( int takeCount, CosmosElement requestContinuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) => ComputeTakeQueryPipelineStage.MonadicCreate( takeCount, requestContinuationToken, - cancellationToken, monadicCreatePipelineStage); public static TryCatch MonadicCreateTopStage( int takeCount, CosmosElement requestContinuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) => ComputeTakeQueryPipelineStage.MonadicCreate( takeCount, requestContinuationToken, - cancellationToken, monadicCreatePipelineStage); private static TryCatch MonadicCreate( int takeCount, CosmosElement requestContinuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) { if (takeCount < 0) @@ -87,7 +81,7 @@ private static TryCatch MonadicCreate( $"{nameof(TakeContinuationToken.TakeCount)} in {nameof(TakeContinuationToken)}: {requestContinuationToken}: {takeContinuationToken.TakeCount} can not be greater than the limit count in the query: {takeCount}.")); } - TryCatch tryCreateSource = monadicCreatePipelineStage(takeContinuationToken.SourceToken, cancellationToken); + TryCatch tryCreateSource = monadicCreatePipelineStage(takeContinuationToken.SourceToken); if (tryCreateSource.Failed) { return tryCreateSource; @@ -95,22 +89,21 @@ private static TryCatch MonadicCreate( IQueryPipelineStage stage = new ComputeTakeQueryPipelineStage( tryCreateSource.Result, - cancellationToken, takeContinuationToken.TakeCount); return TryCatch.FromResult(stage); } - public override async ValueTask MoveNextAsync(ITrace trace) + public override async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - this.cancellationToken.ThrowIfCancellationRequested(); + cancellationToken.ThrowIfCancellationRequested(); if (trace == null) { throw new ArgumentNullException(nameof(trace)); } - if (this.ReturnedFinalPage || !await this.inputStage.MoveNextAsync(trace)) + if (this.ReturnedFinalPage || !await this.inputStage.MoveNextAsync(trace, cancellationToken)) { this.Current = default; this.takeCount = 0; @@ -146,12 +139,12 @@ public override async ValueTask MoveNextAsync(ITrace trace) documents: takedDocuments, requestCharge: sourcePage.RequestCharge, activityId: sourcePage.ActivityId, - responseLengthInBytes: sourcePage.ResponseLengthInBytes, cosmosQueryExecutionInfo: sourcePage.CosmosQueryExecutionInfo, distributionPlanSpec: default, disallowContinuationTokenMessage: sourcePage.DisallowContinuationTokenMessage, additionalHeaders: sourcePage.AdditionalHeaders, - state: queryState); + state: queryState, + streaming: sourcePage.Streaming); this.Current = TryCatch.FromResult(queryPage); return true; diff --git a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.cs index d2bb5c4567..5dea48e840 100644 --- a/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/src/Query/Core/Pipeline/Take/TakeQueryPipelineStage.cs @@ -17,9 +17,8 @@ internal abstract partial class TakeQueryPipelineStage : QueryPipelineStageBase protected TakeQueryPipelineStage( IQueryPipelineStage source, - CancellationToken cancellationToken, int takeCount) - : base(source, cancellationToken) + : base(source) { this.takeCount = takeCount; } @@ -28,18 +27,15 @@ public static TryCatch MonadicCreateLimitStage( ExecutionEnvironment executionEnvironment, int limitCount, CosmosElement requestContinuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) => executionEnvironment switch { ExecutionEnvironment.Client => ClientTakeQueryPipelineStage.MonadicCreateLimitStage( limitCount, requestContinuationToken, - cancellationToken, monadicCreatePipelineStage), ExecutionEnvironment.Compute => ComputeTakeQueryPipelineStage.MonadicCreateLimitStage( limitCount, requestContinuationToken, - cancellationToken, monadicCreatePipelineStage), _ => throw new ArgumentOutOfRangeException($"Unknown {nameof(ExecutionEnvironment)}: {executionEnvironment}."), }; @@ -48,18 +44,15 @@ public static TryCatch MonadicCreateTopStage( ExecutionEnvironment executionEnvironment, int limitCount, CosmosElement requestContinuationToken, - CancellationToken cancellationToken, MonadicCreatePipelineStage monadicCreatePipelineStage) => executionEnvironment switch { ExecutionEnvironment.Client => ClientTakeQueryPipelineStage.MonadicCreateTopStage( limitCount, requestContinuationToken, - cancellationToken, monadicCreatePipelineStage), ExecutionEnvironment.Compute => ComputeTakeQueryPipelineStage.MonadicCreateTopStage( limitCount, requestContinuationToken, - cancellationToken, monadicCreatePipelineStage), _ => throw new ArgumentOutOfRangeException($"Unknown {nameof(ExecutionEnvironment)}: {executionEnvironment}."), }; diff --git a/Microsoft.Azure.Cosmos/src/Query/v3Query/CosmosQueryClientCore.cs b/Microsoft.Azure.Cosmos/src/Query/v3Query/CosmosQueryClientCore.cs index e1c23c156b..f4817997ab 100644 --- a/Microsoft.Azure.Cosmos/src/Query/v3Query/CosmosQueryClientCore.cs +++ b/Microsoft.Azure.Cosmos/src/Query/v3Query/CosmosQueryClientCore.cs @@ -330,12 +330,12 @@ private static TryCatch GetCosmosElementResponse( cosmosResponseMessage.Content.CopyTo(memoryStream); } - long responseLengthBytes = memoryStream.Length; CosmosQueryClientCore.ParseRestStream( memoryStream, resourceType, out CosmosArray documents, - out CosmosObject distributionPlan); + out CosmosObject distributionPlan, + out bool? streaming); DistributionPlanSpec distributionPlanSpec = null; @@ -384,12 +384,12 @@ private static TryCatch GetCosmosElementResponse( documents, cosmosResponseMessage.Headers.RequestCharge, cosmosResponseMessage.Headers.ActivityId, - responseLengthBytes, cosmosQueryExecutionInfo, distributionPlanSpec, disallowContinuationTokenMessage: null, additionalHeaders, - queryState); + queryState, + streaming); return TryCatch.FromResult(response); } @@ -460,12 +460,14 @@ private Task GetRoutingMapProviderAsync() /// The memory stream response for the query REST response Azure Cosmos /// The resource type /// An array of CosmosElements parsed from the response body - /// An object containing the distribution plan for the client + /// An object containing the distribution plan for the client + /// An optional return value indicating if the backend response is streaming public static void ParseRestStream( Stream stream, ResourceType resourceType, out CosmosArray documents, - out CosmosObject distributionPlan) + out CosmosObject distributionPlan, + out bool? streaming) { if (!(stream is MemoryStream memoryStream)) { @@ -507,7 +509,8 @@ public static void ParseRestStream( // "Name": "root" // } // } - // } + // }, + // "_streaming": true // } // You want to create a CosmosElement for each document in "Documents". @@ -557,7 +560,22 @@ public static void ParseRestStream( else { distributionPlan = null; - } + } + + if (resourceType == ResourceType.Document && jsonNavigator.TryGetObjectProperty(jsonNavigator.GetRootNode(), "_streaming", out ObjectProperty streamingProperty)) + { + JsonNodeType jsonNodeType = jsonNavigator.GetNodeType(streamingProperty.ValueNode); + streaming = jsonNodeType switch + { + JsonNodeType.False => false, + JsonNodeType.True => true, + _ => throw new InvalidOperationException($"Response Body Contract was violated. QueryResponse had _streaming property as a non boolean: {jsonNodeType}"), + }; + } + else + { + streaming = null; + } } } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/src/Query/v3Query/QueryIterator.cs b/Microsoft.Azure.Cosmos/src/Query/v3Query/QueryIterator.cs index 344e3e56e4..fe540f222e 100644 --- a/Microsoft.Azure.Cosmos/src/Query/v3Query/QueryIterator.cs +++ b/Microsoft.Azure.Cosmos/src/Query/v3Query/QueryIterator.cs @@ -192,14 +192,12 @@ public override async Task ReadNextAsync(ITrace trace, Cancella try { // This catches exception thrown by the pipeline and converts it to QueryResponse - this.queryPipelineStage.SetCancellationToken(cancellationToken); - if (!await this.queryPipelineStage.MoveNextAsync(trace)) + if (!await this.queryPipelineStage.MoveNextAsync(trace, cancellationToken)) { this.hasMoreResults = false; return QueryResponse.CreateSuccess( result: EmptyPage, count: EmptyPage.Count, - responseLengthBytes: default, serializationOptions: this.cosmosSerializationFormatOptions, responseHeaders: new CosmosQueryResponseMessageHeaders( continauationToken: default, @@ -248,7 +246,6 @@ public override async Task ReadNextAsync(ITrace trace, Cancella return QueryResponse.CreateSuccess( result: tryGetQueryPage.Result.Documents, count: tryGetQueryPage.Result.Documents.Count, - responseLengthBytes: tryGetQueryPage.Result.ResponseLengthInBytes, serializationOptions: this.cosmosSerializationFormatOptions, responseHeaders: headers, trace: trace); diff --git a/Microsoft.Azure.Cosmos/src/Query/v3Query/QueryResponse.cs b/Microsoft.Azure.Cosmos/src/Query/v3Query/QueryResponse.cs index 49f9e40232..898a628088 100644 --- a/Microsoft.Azure.Cosmos/src/Query/v3Query/QueryResponse.cs +++ b/Microsoft.Azure.Cosmos/src/Query/v3Query/QueryResponse.cs @@ -38,7 +38,6 @@ internal QueryResponse() private QueryResponse( IReadOnlyList result, int count, - long responseLengthBytes, CosmosQueryResponseMessageHeaders responseHeaders, HttpStatusCode statusCode, RequestMessage requestMessage, @@ -55,7 +54,6 @@ private QueryResponse( { this.CosmosElements = result; this.Count = count; - this.ResponseLengthBytes = responseLengthBytes; this.memoryStream = memoryStream; this.CosmosSerializationOptions = serializationOptions; } @@ -68,14 +66,6 @@ private QueryResponse( internal virtual CosmosQueryResponseMessageHeaders QueryHeaders => (CosmosQueryResponseMessageHeaders)this.Headers; - /// - /// Gets the response length in bytes - /// - /// - /// This value is only set for Direct mode. - /// - internal long ResponseLengthBytes { get; } - internal virtual CosmosSerializationFormatOptions CosmosSerializationOptions { get; } internal bool GetHasMoreResults() @@ -86,7 +76,6 @@ internal bool GetHasMoreResults() internal static QueryResponse CreateSuccess( IReadOnlyList result, int count, - long responseLengthBytes, CosmosQueryResponseMessageHeaders responseHeaders, CosmosSerializationFormatOptions serializationOptions, ITrace trace) @@ -96,11 +85,6 @@ internal static QueryResponse CreateSuccess( throw new ArgumentOutOfRangeException("count must be positive"); } - if (responseLengthBytes < 0) - { - throw new ArgumentOutOfRangeException("responseLengthBytes must be positive"); - } - Lazy memoryStream = new Lazy(() => CosmosElementSerializer.ToStream( responseHeaders.ContainerRid, result, @@ -110,7 +94,6 @@ internal static QueryResponse CreateSuccess( QueryResponse cosmosQueryResponse = new QueryResponse( result: result, count: count, - responseLengthBytes: responseLengthBytes, responseHeaders: responseHeaders, statusCode: HttpStatusCode.OK, cosmosException: null, @@ -132,7 +115,6 @@ internal static QueryResponse CreateFailure( QueryResponse cosmosQueryResponse = new QueryResponse( result: new List(), count: 0, - responseLengthBytes: 0, responseHeaders: responseHeaders, statusCode: statusCode, cosmosException: cosmosException, diff --git a/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/CrossPartitionReadFeedAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/CrossPartitionReadFeedAsyncEnumerator.cs index 5641318ec3..709b410de2 100644 --- a/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/CrossPartitionReadFeedAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/CrossPartitionReadFeedAsyncEnumerator.cs @@ -14,7 +14,7 @@ namespace Microsoft.Azure.Cosmos.ReadFeed.Pagination using Microsoft.Azure.Cosmos.Tracing; using Microsoft.Azure.Documents; - internal sealed class CrossPartitionReadFeedAsyncEnumerator : IAsyncEnumerator>> + internal sealed class CrossPartitionReadFeedAsyncEnumerator : ITracingAsyncEnumerator>> { private readonly CrossPartitionRangePageAsyncEnumerator crossPartitionEnumerator; @@ -26,12 +26,7 @@ private CrossPartitionReadFeedAsyncEnumerator( public TryCatch> Current { get; set; } - public ValueTask MoveNextAsync() - { - return this.MoveNextAsync(NoOpTrace.Singleton); - } - - public async ValueTask MoveNextAsync(ITrace trace) + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (trace == null) { @@ -40,7 +35,7 @@ public async ValueTask MoveNextAsync(ITrace trace) using (ITrace moveNextAsyncTrace = trace.StartChild(name: nameof(MoveNextAsync), component: TraceComponent.ReadFeed, level: TraceLevel.Info)) { - if (!await this.crossPartitionEnumerator.MoveNextAsync(moveNextAsyncTrace)) + if (!await this.crossPartitionEnumerator.MoveNextAsync(moveNextAsyncTrace, cancellationToken)) { this.Current = default; return false; @@ -61,16 +56,10 @@ public async ValueTask MoveNextAsync(ITrace trace) public ValueTask DisposeAsync() => this.crossPartitionEnumerator.DisposeAsync(); - public void SetCancellationToken(CancellationToken cancellationToken) - { - this.crossPartitionEnumerator.SetCancellationToken(cancellationToken); - } - public static CrossPartitionReadFeedAsyncEnumerator Create( IDocumentContainer documentContainer, CrossFeedRangeState crossFeedRangeState, - ReadFeedPaginationOptions readFeedPaginationOptions, - CancellationToken cancellationToken) + ReadFeedPaginationOptions readFeedPaginationOptions) { if (documentContainer == null) { @@ -100,12 +89,10 @@ public static CrossPartitionReadFeedAsyncEnumerator Create( documentContainer, CrossPartitionReadFeedAsyncEnumerator.MakeCreateFunction( documentContainer, - readFeedPaginationOptions, - cancellationToken), + readFeedPaginationOptions), comparer: comparer, maxConcurrency: default, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken, crossFeedRangeState); CrossPartitionReadFeedAsyncEnumerator enumerator = new CrossPartitionReadFeedAsyncEnumerator( @@ -116,14 +103,12 @@ public static CrossPartitionReadFeedAsyncEnumerator Create( private static CreatePartitionRangePageAsyncEnumerator MakeCreateFunction( IReadFeedDataSource readFeedDataSource, - ReadFeedPaginationOptions readFeedPaginationOptions, - CancellationToken cancellationToken) + ReadFeedPaginationOptions readFeedPaginationOptions) { return (FeedRangeState feedRangeState) => new ReadFeedPartitionRangeEnumerator( readFeedDataSource, feedRangeState, - readFeedPaginationOptions, - cancellationToken); + readFeedPaginationOptions); } private sealed class PartitionRangePageAsyncEnumeratorComparerForward : IComparer> diff --git a/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/ReadFeedPage.cs b/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/ReadFeedPage.cs index 391cfcc145..b7528b62d3 100644 --- a/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/ReadFeedPage.cs +++ b/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/ReadFeedPage.cs @@ -22,17 +22,21 @@ internal sealed class ReadFeedPage : Page public ReadFeedPage( Stream content, - double requestCharge, + double requestCharge, + int itemCount, string activityId, IReadOnlyDictionary additionalHeaders, ReadFeedState state) : base(requestCharge, activityId, additionalHeaders, state) { - this.Content = content ?? throw new ArgumentNullException(nameof(content)); + this.Content = content ?? throw new ArgumentNullException(nameof(content)); + this.ItemCount = itemCount; } - public Stream Content { get; } - + public Stream Content { get; } + + public override int ItemCount { get; } + protected override ImmutableHashSet DerivedClassBannedHeaders => BannedHeaders; } } diff --git a/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/ReadFeedPartitionRangeEnumerator.cs b/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/ReadFeedPartitionRangeEnumerator.cs index c2969f6356..6fc27dea80 100644 --- a/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/ReadFeedPartitionRangeEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/ReadFeed/Pagination/ReadFeedPartitionRangeEnumerator.cs @@ -20,9 +20,8 @@ internal sealed class ReadFeedPartitionRangeEnumerator : PartitionRangePageAsync public ReadFeedPartitionRangeEnumerator( IReadFeedDataSource readFeedDataSource, FeedRangeState feedRangeState, - ReadFeedPaginationOptions readFeedPaginationOptions, - CancellationToken cancellationToken) - : base(feedRangeState, cancellationToken) + ReadFeedPaginationOptions readFeedPaginationOptions) + : base(feedRangeState) { this.readFeedDataSource = readFeedDataSource ?? throw new ArgumentNullException(nameof(readFeedDataSource)); this.readFeedPaginationOptions = readFeedPaginationOptions; diff --git a/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedCrossFeedRangeAsyncEnumerable.cs b/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedCrossFeedRangeAsyncEnumerable.cs index d581e742b5..a1c7ff927c 100644 --- a/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedCrossFeedRangeAsyncEnumerable.cs +++ b/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedCrossFeedRangeAsyncEnumerable.cs @@ -7,9 +7,11 @@ namespace Microsoft.Azure.Cosmos.ReadFeed using System; using System.Collections.Generic; using System.Threading; + using System.Threading.Tasks; using Microsoft.Azure.Cosmos.Pagination; using Microsoft.Azure.Cosmos.Query.Core.Monads; using Microsoft.Azure.Cosmos.ReadFeed.Pagination; + using Microsoft.Azure.Cosmos.Tracing; internal sealed class ReadFeedCrossFeedRangeAsyncEnumerable : IAsyncEnumerable> { @@ -33,10 +35,10 @@ public IAsyncEnumerator> GetAsyncEnumerator(CancellationT CrossPartitionReadFeedAsyncEnumerator innerEnumerator = CrossPartitionReadFeedAsyncEnumerator.Create( this.documentContainer, innerState, - this.readFeedPaginationOptions, - cancellationToken); + this.readFeedPaginationOptions); - return new ReadFeedCrossFeedRangeAsyncEnumerator(innerEnumerator); + ReadFeedCrossFeedRangeAsyncEnumerator readFeedEnumerator = new ReadFeedCrossFeedRangeAsyncEnumerator(innerEnumerator); + return new TracingAsyncEnumerator>(readFeedEnumerator, NoOpTrace.Singleton, cancellationToken); } } } diff --git a/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedCrossFeedRangeAsyncEnumerator.cs b/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedCrossFeedRangeAsyncEnumerator.cs index 996b1eaf78..2f6780cdc9 100644 --- a/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedCrossFeedRangeAsyncEnumerator.cs +++ b/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedCrossFeedRangeAsyncEnumerator.cs @@ -5,14 +5,16 @@ namespace Microsoft.Azure.Cosmos.ReadFeed { using System; - using System.Collections.Generic; + using System.Collections.Generic; + using System.Threading; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.CosmosElements; using Microsoft.Azure.Cosmos.Pagination; using Microsoft.Azure.Cosmos.Query.Core.Monads; - using Microsoft.Azure.Cosmos.ReadFeed.Pagination; - - internal sealed class ReadFeedCrossFeedRangeAsyncEnumerator : IAsyncEnumerator> + using Microsoft.Azure.Cosmos.ReadFeed.Pagination; + using Microsoft.Azure.Cosmos.Tracing; + + internal sealed class ReadFeedCrossFeedRangeAsyncEnumerator : ITracingAsyncEnumerator> { private readonly CrossPartitionReadFeedAsyncEnumerator enumerator; @@ -25,9 +27,9 @@ public ReadFeedCrossFeedRangeAsyncEnumerator(CrossPartitionReadFeedAsyncEnumerat public ValueTask DisposeAsync() => this.enumerator.DisposeAsync(); - public async ValueTask MoveNextAsync() + public async ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { - if (!await this.enumerator.MoveNextAsync()) + if (!await this.enumerator.MoveNextAsync(trace, cancellationToken)) { return false; } @@ -47,7 +49,8 @@ public async ValueTask MoveNextAsync() innerReadFeedPage.Page.Content, Documents.ResourceType.Document, out CosmosArray documents, - out CosmosObject distributionPlan); + out CosmosObject distributionPlan, + out bool? ignored); ReadFeedPage page = new ReadFeedPage( documents, innerReadFeedPage.Page.RequestCharge, diff --git a/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedIteratorCore.cs b/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedIteratorCore.cs index 8a46196a56..1e997d0b8c 100644 --- a/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedIteratorCore.cs +++ b/Microsoft.Azure.Cosmos/src/ReadFeed/ReadFeedIteratorCore.cs @@ -181,8 +181,7 @@ public ReadFeedIteratorCore( CrossPartitionReadFeedAsyncEnumerator.Create( documentContainer, new CrossFeedRangeState(monadicReadFeedState.Result.FeedRangeStates), - readFeedPaginationOptions, - cancellationToken)); + readFeedPaginationOptions)); } this.hasMoreResults = true; @@ -232,12 +231,11 @@ public override async Task ReadNextAsync( } CrossPartitionReadFeedAsyncEnumerator enumerator = this.monadicEnumerator.Result; - enumerator.SetCancellationToken(cancellationToken); TryCatch> monadicPage; try { - if (!await enumerator.MoveNextAsync(trace)) + if (!await enumerator.MoveNextAsync(trace, cancellationToken)) { throw new InvalidOperationException("Should not be calling enumerator that does not have any more results"); } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/IndexMetricsParserBaselineTest.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/IndexMetricsParserBaselineTest.cs index 43d6fad184..d54fa8cd67 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/IndexMetricsParserBaselineTest.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/IndexMetricsParserBaselineTest.cs @@ -376,73 +376,57 @@ public void IndexUtilizationClientSideExistenceTest() public override IndexMetricsParserTestOutput ExecuteTest(IndexMetricsParserTestInput input) { - string indexMetricsNonODE = default; // Execute without ODE - QueryRequestOptions requestOptions = new QueryRequestOptions() { PopulateIndexMetrics = true, EnableOptimisticDirectExecution = false }; + string indexMetricsNonODE = RunTest(input.Query, enableOptimisticDirectExecution: false); - FeedIterator itemQuery = testContainer.GetItemQueryIterator( - input.Query, + // Execute with ODE + string indexMetricsODE = RunTest(input.Query, enableOptimisticDirectExecution: true); + + // Make sure ODE and non-ODE is consistent + Assert.AreEqual(indexMetricsNonODE, indexMetricsODE); + + return new IndexMetricsParserTestOutput(indexMetricsNonODE); + } + + private static string RunTest(string query, bool enableOptimisticDirectExecution) + { + QueryRequestOptions requestOptions = new QueryRequestOptions() { PopulateIndexMetrics = true, EnableOptimisticDirectExecution = enableOptimisticDirectExecution }; + + using FeedIterator itemQuery = testContainer.GetItemQueryIterator( + query, requestOptions: requestOptions); // Index Metrics is returned fully on the first page so no need to worry about result set - int roundTripCount = 1; + int roundTripCount = 0; + string indexMetrics = null; while (itemQuery.HasMoreResults) { FeedResponse page = itemQuery.ReadNextAsync().Result; Assert.IsTrue(page.Headers.AllKeys().Length > 1); - if (roundTripCount > 1) + if (roundTripCount > 0) { - if (page.IndexMetrics != null) Assert.Fail("Expected only Index Metrics on first round trip. Current round trip %n", roundTripCount); + if (page.IndexMetrics != null) + { + Assert.Fail("Expected only Index Metrics on first round trip. Current round trip %n", roundTripCount); + } } else { Assert.IsNotNull(page.Headers.Get(HttpConstants.HttpHeaders.IndexUtilization), "Expected index utilization headers for query"); Assert.IsNotNull(page.IndexMetrics, "Expected index metrics response for query"); - indexMetricsNonODE = page.IndexMetrics; + indexMetrics = page.IndexMetrics; } roundTripCount++; } - // Execute with ODE - string indexMetricsODE = default; - QueryRequestOptions requestOptions2 = new QueryRequestOptions() { PopulateIndexMetrics = true, EnableOptimisticDirectExecution = true }; - - FeedIterator itemQuery2 = testContainer.GetItemQueryIterator( - input.Query, - requestOptions: requestOptions2); - - // Index Metrics is returned fully on the first page so no need to worry about result set - int roundTripCount2 = 1; - while (itemQuery2.HasMoreResults) - { - FeedResponse page2 = itemQuery2.ReadNextAsync().Result; - Assert.IsTrue(page2.Headers.AllKeys().Length > 1); - - if (roundTripCount2 > 1) - { - if (page2.IndexMetrics != null) Assert.Fail("Expected only Index Metrics on first round trip. Current round trip %n", roundTripCount2); - } - else - { - Assert.IsNotNull(page2.Headers.Get(HttpConstants.HttpHeaders.IndexUtilization), "Expected index utilization headers for query"); - Assert.IsNotNull(page2.IndexMetrics, "Expected index metrics response for query"); - - indexMetricsODE = page2.IndexMetrics; - } - - roundTripCount2++; - } - - // Make sure ODE and non-ODE is consistent - Assert.AreEqual(indexMetricsNonODE, indexMetricsODE); - - return new IndexMetricsParserTestOutput(indexMetricsNonODE); + return indexMetrics; } } + public sealed class IndexMetricsParserTestInput : BaselineTestInput { public IndexMetricsParserTestInput(string description, string query) diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Query/NonStreamingOrderByQueryTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Query/NonStreamingOrderByQueryTests.cs new file mode 100644 index 0000000000..355e6b0fac --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Query/NonStreamingOrderByQueryTests.cs @@ -0,0 +1,178 @@ +namespace Microsoft.Azure.Cosmos.EmulatorTests.Query +{ + using System; + using System.Collections.Generic; + using System.Diagnostics; + using System.Linq; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.CosmosElements; + using Microsoft.VisualStudio.TestTools.UnitTesting; + + [TestClass] + [TestCategory("Query")] + public sealed class NonStreamingOrderByQueryTests : QueryTestsBase + { + private const string Embedding = "[-0.008861,0.097097,0.100236,0.070044,-0.079279,0.000923,-0.012829,0.064301,-0.029405,-0.009858,-0.017753,0.063115,0.033623,0.019805,0.052704,-0.100458,0.089387,-0.040792,-0.088936,0.110212,-0.044749,0.077675,-0.017062,-0.063745,-0.009502,-0.079371,0.066952,-0.070209,0.063761,-0.038194,-0.046252,0.049983,-0.094985,-0.086341,0.024665,-0.112857,-0.038358,-0.007008,-0.010063,-0.000183,0.068841,0.024942,-0.042561,-0.044576,0.010776,0.006323,0.088285,-0.062522,0.028216,0.088291,0.033231,-0.033732,-0.002995,0.118994,0.000453,0.158588,-0.044475,-0.137629,0.066080,0.062824,-0.128369,-0.087959,0.028080,0.070063,0.046700,-0.083278,-0.118428,0.071118,0.100757,0.017944,0.026296,0.017282,-0.082127,-0.006148,0.002967,-0.032857,-0.076493,-0.072842,-0.055179,-0.081703,0.011437,-0.038698,-0.062540,-0.027899,0.087635,0.031870,0.029164,0.000524,-0.039895,-0.055559,0.024582,-0.030595,0.003942,-0.034500,0.003012,-0.023863,0.033831,0.061476,-0.090183,-0.039206,-0.026586,-0.042763,0.049835,-0.052496,-0.020044,0.073703,0.096775,0.033063,0.000313,-0.022581,-0.141154,0.032095,0.077733,-0.063739,-0.055647,-0.017604,0.044639,-0.062925,-0.001960,0.024665,-0.009416,-0.021381,0.082724,-0.031026,0.027255,0.066198,0.000845,0.008393,0.039434,0.054104,-0.060255,0.034266,0.079435,0.043624,-0.015871,-0.038030,-0.030374,-0.020542,0.007132,0.008708,0.087840,0.017351,-0.089493,0.030182,0.026961,-0.071212,-0.004854,0.007389,0.067203,-0.026351,-0.011460,-0.058723,0.013153,-0.020313,-0.051170,0.002242,0.088222,-0.004267,-0.073523,-0.021874,-0.033585,-0.048553,-0.019119,-0.025310,0.053096,0.111063,0.035042,-0.082811,-0.073749,-0.010048,0.012265,-0.023893,-0.125340,0.026611,0.043258,-0.010473,-0.044428,-0.039251,-0.046891,-0.013008,0.062219,0.078732,-0.086303,0.016901,0.010331,-0.043754,-0.057733,-0.037964,0.024907,0.068143,-0.019992,-0.035030,0.038854,0.034345,-0.048839,-0.105419,0.043013,-0.023374,-0.077629,-0.076465,0.078564,-0.024519,0.041293,-0.032088,-0.007053,0.022618,-0.004657,-0.093970,-0.000199,0.004813,-0.044789,-0.127900,-0.033516,-0.043816,0.033056,-0.057619,0.004901,0.018863,0.039752,0.000739,-0.136350,-0.067819,-0.014856,0.058351,-0.014275,-0.000873,-0.039388,-0.017191,-0.051184,-0.046863,0.006143,-0.075998,-0.064695,0.046676,-0.020558,0.082474,0.160449,-0.027475,0.009541,-0.021876,0.027416,0.078049,0.089309,0.032928,-0.033272,0.048905,0.061164,0.054811,0.024527,-0.034978,-0.018083,-0.077601,0.034112,-0.021121,0.098856,0.019585,-0.058928,-0.016126,-0.011748,0.031588,0.003205,-0.077483,-0.002372,-0.113548,0.047445,-0.027094,-0.032843,0.042378,-0.074703,0.057001,0.012020,0.131156,0.002080,-0.065770,0.112443,0.047786,0.024492,-0.108401,0.016836,0.001478,0.041542,-0.067801,0.102876,-0.052808,-0.136035,0.073852,0.079966,-0.000586,0.034055,-0.053040,0.050461,-0.021550,0.014827,0.077605,-0.024783,-0.082388,0.074410,-0.033689,-0.010982,0.043733]"; + + private static readonly IReadOnlyList Documents = new List + { + @"{""id"":""0"", ""word"":""sayVERB"", ""embedding"":[-0.008861,0.097097,0.100236,0.070044,-0.079279,0.000923,-0.012829,0.064301,-0.029405,-0.009858,-0.017753,0.063115,0.033623,0.019805,0.052704,-0.100458,0.089387,-0.040792,-0.088936,0.110212,-0.044749,0.077675,-0.017062,-0.063745,-0.009502,-0.079371,0.066952,-0.070209,0.063761,-0.038194,-0.046252,0.049983,-0.094985,-0.086341,0.024665,-0.112857,-0.038358,-0.007008,-0.010063,-0.000183,0.068841,0.024942,-0.042561,-0.044576,0.010776,0.006323,0.088285,-0.062522,0.028216,0.088291,0.033231,-0.033732,-0.002995,0.118994,0.000453,0.158588,-0.044475,-0.137629,0.066080,0.062824,-0.128369,-0.087959,0.028080,0.070063,0.046700,-0.083278,-0.118428,0.071118,0.100757,0.017944,0.026296,0.017282,-0.082127,-0.006148,0.002967,-0.032857,-0.076493,-0.072842,-0.055179,-0.081703,0.011437,-0.038698,-0.062540,-0.027899,0.087635,0.031870,0.029164,0.000524,-0.039895,-0.055559,0.024582,-0.030595,0.003942,-0.034500,0.003012,-0.023863,0.033831,0.061476,-0.090183,-0.039206,-0.026586,-0.042763,0.049835,-0.052496,-0.020044,0.073703,0.096775,0.033063,0.000313,-0.022581,-0.141154,0.032095,0.077733,-0.063739,-0.055647,-0.017604,0.044639,-0.062925,-0.001960,0.024665,-0.009416,-0.021381,0.082724,-0.031026,0.027255,0.066198,0.000845,0.008393,0.039434,0.054104,-0.060255,0.034266,0.079435,0.043624,-0.015871,-0.038030,-0.030374,-0.020542,0.007132,0.008708,0.087840,0.017351,-0.089493,0.030182,0.026961,-0.071212,-0.004854,0.007389,0.067203,-0.026351,-0.011460,-0.058723,0.013153,-0.020313,-0.051170,0.002242,0.088222,-0.004267,-0.073523,-0.021874,-0.033585,-0.048553,-0.019119,-0.025310,0.053096,0.111063,0.035042,-0.082811,-0.073749,-0.010048,0.012265,-0.023893,-0.125340,0.026611,0.043258,-0.010473,-0.044428,-0.039251,-0.046891,-0.013008,0.062219,0.078732,-0.086303,0.016901,0.010331,-0.043754,-0.057733,-0.037964,0.024907,0.068143,-0.019992,-0.035030,0.038854,0.034345,-0.048839,-0.105419,0.043013,-0.023374,-0.077629,-0.076465,0.078564,-0.024519,0.041293,-0.032088,-0.007053,0.022618,-0.004657,-0.093970,-0.000199,0.004813,-0.044789,-0.127900,-0.033516,-0.043816,0.033056,-0.057619,0.004901,0.018863,0.039752,0.000739,-0.136350,-0.067819,-0.014856,0.058351,-0.014275,-0.000873,-0.039388,-0.017191,-0.051184,-0.046863,0.006143,-0.075998,-0.064695,0.046676,-0.020558,0.082474,0.160449,-0.027475,0.009541,-0.021876,0.027416,0.078049,0.089309,0.032928,-0.033272,0.048905,0.061164,0.054811,0.024527,-0.034978,-0.018083,-0.077601,0.034112,-0.021121,0.098856,0.019585,-0.058928,-0.016126,-0.011748,0.031588,0.003205,-0.077483,-0.002372,-0.113548,0.047445,-0.027094,-0.032843,0.042378,-0.074703,0.057001,0.012020,0.131156,0.002080,-0.065770,0.112443,0.047786,0.024492,-0.108401,0.016836,0.001478,0.041542,-0.067801,0.102876,-0.052808,-0.136035,0.073852,0.079966,-0.000586,0.034055,-0.053040,0.050461,-0.021550,0.014827,0.077605,-0.024783,-0.082388,0.074410,-0.033689,-0.010982,0.043733]}", + @"{""id"":""1"", ""word"":""go_VERB"", ""embedding"":[0.010490,0.094733,0.143699,0.040344,-0.103710,-0.000016,-0.014351,0.019653,0.069472,-0.046938,-0.057882,0.076405,-0.025230,0.026663,0.029986,-0.001605,-0.027803,0.037521,-0.050608,0.016215,0.025947,0.061172,-0.037448,-0.079232,0.071731,-0.085143,0.021494,-0.135554,-0.026115,-0.066408,0.022858,0.083231,0.020998,-0.049906,-0.079992,-0.060827,-0.028916,-0.029005,0.026067,-0.074869,0.073802,0.023593,-0.024348,-0.093236,0.006169,0.013119,0.007817,-0.088096,-0.012373,0.099807,0.011438,0.028583,0.025614,0.175403,0.007033,0.038856,0.004040,-0.088907,0.079697,0.037448,-0.128230,-0.066502,-0.018969,0.025777,0.035905,0.003710,-0.089079,0.071521,0.039237,0.052136,0.020986,-0.030793,-0.069486,-0.137115,0.008305,0.020813,-0.155342,0.000619,-0.033499,-0.104162,-0.061528,-0.043877,-0.042524,-0.032872,0.045071,0.072908,0.096057,0.141987,-0.078056,-0.013102,-0.026589,-0.073783,0.114807,0.077389,-0.041879,-0.052886,0.053710,0.036806,-0.035973,0.049071,-0.107199,-0.043581,0.016515,-0.029278,-0.026228,0.068037,-0.024183,0.040984,-0.020469,-0.103833,-0.007225,-0.073788,-0.051063,-0.037850,0.052581,-0.053090,-0.012198,-0.057343,0.024050,-0.046498,0.003065,-0.058912,0.043695,0.006340,0.060953,-0.008608,-0.029686,0.081187,-0.020058,0.059240,-0.061306,-0.002190,-0.020671,0.076712,0.049087,0.001153,0.087481,0.008559,0.069936,-0.015886,0.006122,0.038000,-0.071984,0.005263,0.060463,-0.051217,-0.034060,0.045217,0.059163,-0.048462,-0.005371,0.009663,0.081303,0.051019,-0.001248,-0.022637,0.016228,-0.006395,-0.053985,-0.014513,-0.017219,-0.010658,-0.012446,-0.035279,-0.003882,0.036453,0.029681,0.021278,0.006188,0.027861,0.076864,-0.042835,-0.022834,0.013928,0.066150,0.040982,-0.110985,-0.018865,0.006675,0.019173,0.021484,-0.021977,-0.035462,0.000464,-0.024281,0.010881,-0.064037,-0.024893,-0.095968,0.020834,-0.114225,-0.023433,-0.043971,0.014273,0.013481,-0.007542,0.079197,0.021280,-0.129871,0.080770,0.028912,-0.044134,-0.019904,-0.039406,-0.076024,0.058488,-0.094331,-0.082633,0.017676,-0.084006,-0.024444,-0.049778,-0.044615,-0.013499,-0.036736,-0.038579,-0.117319,0.012026,-0.007846,0.024003,-0.101645,0.111720,-0.010241,0.050279,-0.002212,0.060056,-0.116837,0.006078,-0.017954,-0.021794,0.020252,-0.031337,-0.032407,0.081086,-0.095125,0.041699,0.015953,-0.045653,-0.022522,-0.021422,-0.029167,0.052594,0.016523,0.081598,-0.027877,0.000609,0.012837,0.011880,0.074220,0.009736,0.006465,-0.140252,0.010762,-0.038319,0.038924,0.042537,0.005027,0.014024,0.024548,0.050131,-0.048069,-0.012616,-0.052162,-0.100378,0.067741,-0.067824,-0.020692,-0.043022,-0.038036,-0.016860,0.027835,0.140990,-0.045201,-0.069347,0.174518,-0.000236,0.008150,-0.039823,0.041197,0.056322,0.085883,0.027376,0.036537,0.094723,-0.103076,0.105746,0.059074,0.010947,0.099756,-0.027213,0.128793,-0.054593,0.025890,0.053512,0.005200,-0.035256,0.063273,-0.027069,0.046354,-0.002262]}", + @"{""id"":""2"", ""word"":""make_VERB"", ""embedding"":[-0.013029,0.038892,0.008581,0.056925,-0.100181,0.011566,-0.072478,0.156239,0.038442,-0.073817,-0.000439,0.114153,-0.051814,-0.056424,-0.038872,0.054174,0.000059,0.039477,-0.021345,0.053860,-0.131669,-0.020844,0.012362,-0.016145,0.048171,-0.122080,0.028292,-0.043984,-0.025178,-0.006927,-0.029133,-0.085539,-0.086455,0.001830,-0.099361,-0.029536,0.071144,-0.003143,0.027941,-0.035858,0.026530,0.004768,0.021307,-0.065139,-0.053572,0.038951,0.045786,-0.045258,-0.037586,0.038983,-0.062755,-0.000504,0.044502,0.123845,-0.050279,0.030425,-0.067798,-0.037958,0.023805,-0.011021,-0.041084,-0.090643,0.130500,0.046460,-0.040764,0.020988,-0.087054,-0.017896,0.056193,0.007352,-0.019590,-0.048728,-0.027895,-0.027241,-0.038715,0.008038,-0.172688,-0.106911,-0.012085,-0.050829,-0.053590,-0.059879,-0.030488,-0.025220,0.020381,0.102120,0.041989,0.119341,-0.006702,0.035009,0.016077,-0.014298,0.124971,0.050049,0.113425,-0.027587,-0.001379,-0.031188,0.041054,-0.013872,-0.134232,-0.073757,0.075578,-0.064260,0.035823,0.032695,-0.059019,0.086900,-0.049042,-0.105385,-0.024058,0.095202,-0.044429,-0.053781,-0.013759,-0.077265,-0.043720,-0.082217,0.128089,-0.041757,-0.023743,0.027764,0.008487,-0.022274,-0.023357,-0.013653,0.047372,0.098364,-0.020791,-0.063818,0.055996,-0.007599,0.018954,-0.003601,0.055991,-0.089158,0.008229,-0.027915,0.056351,0.101133,0.043454,0.026218,0.010540,0.053571,0.079725,-0.048278,-0.048708,-0.075923,-0.045807,0.083970,-0.087983,0.058780,0.025992,-0.008407,-0.059681,-0.022862,0.099799,0.083928,-0.024096,0.008313,-0.065932,-0.003852,0.051210,-0.104068,-0.029864,0.021315,-0.036515,-0.050546,0.003077,0.007452,-0.020468,0.035296,-0.025792,-0.045913,0.042664,-0.025302,-0.057182,-0.026525,-0.053029,-0.009697,0.031003,0.064251,-0.096399,-0.020674,0.006306,-0.004981,-0.118857,-0.058013,-0.018890,0.042343,-0.111604,-0.071149,0.042898,0.094869,-0.029797,-0.134403,-0.030753,0.050269,-0.096115,0.019021,0.014348,-0.049818,-0.017920,0.044926,0.038627,-0.091947,-0.001567,0.064930,-0.065977,-0.015673,0.034979,0.064560,0.036580,-0.000075,-0.064665,-0.054986,-0.090783,-0.033908,0.106271,0.058234,-0.100301,0.015398,-0.072886,0.019940,0.066563,0.063845,-0.036548,-0.018204,-0.008618,0.098109,-0.128401,-0.053501,-0.032671,0.027777,-0.043889,-0.018033,0.099028,-0.026501,-0.026575,-0.106259,0.036872,0.024990,0.003347,0.045086,-0.083903,0.021039,0.056445,-0.053898,0.011539,-0.033661,0.020421,-0.051413,0.021900,0.075706,0.089103,-0.022953,-0.032130,-0.049067,0.014476,-0.036070,0.010638,-0.049193,-0.005560,-0.094642,-0.045530,-0.010048,0.074026,0.053386,-0.006803,0.043264,-0.004896,0.020676,0.002030,0.019262,0.043679,-0.006854,-0.064545,-0.059780,-0.070871,0.004817,0.058769,-0.052450,-0.023481,-0.036496,-0.029701,-0.002672,-0.029965,0.053667,0.038260,-0.026692,0.068764,-0.070122,0.060288,0.124118,-0.064670,-0.044363,0.023818,-0.022746,-0.086708,0.016196]}", + @"{""id"":""3"", ""word"":""get_VERB"", ""embedding"":[0.019242,0.144838,0.155635,0.009607,-0.169437,-0.004972,-0.021559,0.009400,0.074920,-0.033244,-0.032937,0.112560,0.041283,-0.030355,-0.048271,-0.061402,0.048208,0.083419,-0.043215,0.069025,-0.027292,0.097641,-0.070595,-0.034194,0.091538,-0.068585,0.012530,-0.120053,-0.014222,0.002379,0.070677,0.015263,0.030467,-0.001756,-0.013990,-0.026711,0.036041,0.014917,0.031644,-0.055844,0.115340,-0.003877,-0.045724,-0.025892,-0.024716,0.020095,-0.024788,0.005623,0.055026,0.078559,0.011337,0.033604,0.051766,0.135975,0.061593,0.029842,-0.021268,-0.136608,0.079957,0.011375,-0.155011,-0.151481,0.076298,0.031386,0.020274,0.028823,-0.127225,0.016972,0.000477,0.005670,-0.015052,-0.032207,-0.054631,-0.125453,0.027146,0.026122,-0.025028,-0.046540,0.021146,-0.082786,-0.051401,-0.015178,-0.017908,0.031622,0.061065,0.053762,0.093668,0.120508,0.013383,-0.019997,0.047220,-0.039273,0.070729,0.006117,0.033016,-0.059557,0.080700,0.044238,-0.013197,0.072017,-0.049897,-0.035379,-0.028077,-0.068918,0.048224,0.093904,-0.011614,0.143074,-0.066022,-0.104345,-0.080770,-0.034034,0.019643,-0.048965,0.020702,-0.061310,-0.077258,-0.091273,0.063781,-0.111229,0.058853,-0.006970,0.029294,0.001331,0.057966,-0.035211,0.033447,0.015224,0.027628,0.020672,0.023872,0.007849,-0.001215,0.045739,-0.032844,-0.027810,0.047459,0.011252,0.034142,-0.020341,0.063802,-0.005105,-0.014244,0.015935,-0.008431,-0.028795,-0.043172,-0.003883,0.023328,-0.022711,0.001897,0.032548,0.064574,0.097152,0.003275,-0.109298,0.024141,0.090362,-0.038664,-0.023928,-0.006557,0.025754,-0.011571,-0.053434,0.040903,0.061501,0.144468,0.017326,-0.032445,-0.024866,-0.000533,-0.067980,-0.099827,0.011754,0.026172,0.041204,-0.059723,0.026298,0.006623,-0.030971,0.030617,-0.008276,-0.084025,-0.030169,0.044463,0.002809,-0.030988,-0.025040,-0.059924,0.042590,-0.041501,0.018392,-0.107077,0.040232,-0.015956,-0.034321,0.064614,0.023561,-0.118772,0.011774,0.053385,-0.059752,-0.007313,-0.024684,0.012329,0.024288,-0.019210,-0.043125,0.031520,-0.072438,-0.043097,-0.061750,-0.049240,-0.039941,-0.086947,-0.019136,-0.082013,-0.095680,-0.012216,0.044958,-0.083804,-0.020841,-0.024199,0.085375,-0.000988,0.002353,-0.075649,-0.016678,-0.042220,0.002328,0.046584,-0.053008,-0.002773,0.059518,-0.113334,0.082102,0.038316,-0.023807,0.014160,-0.024084,0.049738,0.003309,0.020473,0.056583,-0.040877,-0.036386,0.033831,0.014504,0.005588,-0.098602,0.013935,-0.115838,0.048181,-0.013819,0.030253,-0.035629,0.022863,-0.019994,-0.016116,-0.052284,-0.034443,0.024592,-0.028994,-0.093012,-0.007058,0.013011,0.000991,-0.014438,-0.044545,0.040938,-0.043503,0.103244,-0.090978,0.005134,0.156962,0.013555,0.053622,-0.059002,0.032924,0.010204,-0.054882,-0.070490,0.102098,0.071841,-0.041202,0.079912,0.031834,0.048141,0.104557,-0.010763,0.057594,-0.091408,0.010093,0.010459,0.074382,0.028358,0.016023,-0.091680,0.031687,-0.108943]}", + @"{""id"":""4"", ""word"":""one_NUM"", ""embedding"":[0.056419,-0.021141,0.090616,-0.032564,-0.054807,0.031836,-0.004311,0.064928,-0.033537,0.008632,0.052463,0.034665,0.016636,-0.006993,0.002519,-0.049758,0.003396,0.076139,-0.040574,-0.034938,-0.018776,0.016026,-0.049461,-0.058605,0.052319,-0.022392,0.037263,-0.101570,-0.015736,0.014700,0.005736,-0.098316,0.023485,0.079696,0.000523,-0.072989,0.112214,-0.042671,-0.039667,-0.048063,0.076580,-0.044722,-0.009309,-0.029152,-0.032665,-0.017058,0.066964,-0.003375,0.010473,0.053499,-0.057250,0.022080,0.031383,0.110925,-0.070559,0.090881,0.004533,-0.072508,0.064684,0.078511,-0.096904,-0.011367,0.023967,0.045979,0.047333,-0.058862,-0.043411,0.133778,-0.040768,-0.027242,-0.013070,-0.021536,0.068636,-0.093734,-0.071668,0.021352,0.002289,0.037268,-0.065373,-0.020971,-0.023998,-0.035441,0.033266,-0.028960,0.040619,0.064281,0.065757,0.123596,0.082568,-0.004874,0.028938,0.047771,0.025916,0.021118,-0.023998,0.009172,0.100679,-0.078543,-0.046099,0.037856,-0.161992,-0.039157,0.007258,-0.039263,0.063678,0.060994,0.025443,0.036222,-0.007018,-0.108600,0.022631,-0.031306,-0.050116,0.006508,-0.018496,-0.098152,-0.043395,-0.012609,0.047591,-0.006303,0.029529,0.003471,0.004601,-0.050615,0.014272,0.049927,0.105728,0.021712,-0.030996,0.026857,-0.060846,-0.107893,0.017269,0.011160,0.020126,-0.115469,-0.015772,-0.007356,0.083882,0.119524,0.075473,-0.050709,0.036562,-0.059548,-0.053619,-0.049486,-0.124977,0.004016,-0.001549,0.007669,-0.039740,0.002400,-0.069183,-0.065801,-0.088064,-0.107159,0.072443,0.083424,-0.030178,-0.033223,0.084401,0.044171,0.013811,-0.084562,0.003194,0.056247,-0.022866,0.023806,0.047635,0.025468,0.069964,-0.096787,-0.025001,-0.021526,0.061188,0.045733,-0.099350,0.016400,-0.084030,0.056672,0.099467,-0.052893,-0.100382,0.124380,0.018206,0.034541,-0.014580,-0.124287,-0.103852,0.053850,-0.017001,-0.062173,-0.063301,0.024652,-0.023819,-0.057063,-0.013738,-0.027791,-0.013030,0.030126,-0.000573,-0.026613,-0.005942,0.057634,-0.079672,0.022556,0.000011,-0.037800,0.074457,-0.071388,0.086022,0.013307,-0.035879,-0.072396,0.034418,0.004146,-0.011763,0.010722,-0.015462,0.070248,-0.097890,-0.033176,-0.078940,0.169393,0.050572,0.045639,0.004937,0.008214,-0.024237,-0.039713,-0.082674,-0.056171,-0.114638,-0.037084,-0.019666,0.023100,0.091380,-0.070856,0.019748,0.032893,0.054247,0.001259,0.166033,-0.021817,-0.076404,-0.046031,0.002690,-0.015014,0.087486,-0.147485,0.035263,-0.095335,-0.035910,-0.060311,0.068841,0.034646,-0.004976,-0.025316,-0.066141,0.042181,-0.001685,-0.061911,-0.060588,-0.036907,-0.003193,-0.042462,-0.023907,0.015987,-0.030946,0.041203,0.003092,0.075626,0.082451,-0.077289,0.102090,-0.026878,0.057229,-0.067434,-0.041582,0.036640,0.022304,0.018005,-0.005375,0.038557,-0.087250,0.042553,-0.052500,0.070422,0.079278,-0.017086,0.055328,0.025901,0.111785,0.057416,0.022670,0.048453,-0.034165,-0.034901,0.014241,0.061734]}", + @"{""id"":""5"", ""word"":""see_VERB"", ""embedding"":[0.033784,-0.033085,0.020113,-0.010017,-0.081187,-0.021287,-0.009972,0.037831,-0.001592,0.079325,0.025121,-0.012038,-0.060439,0.023697,-0.011899,-0.009593,0.043340,-0.054792,-0.058171,0.111542,-0.072478,0.046219,-0.036414,-0.038994,0.010711,-0.022551,0.051247,-0.030018,-0.015760,-0.022421,0.029474,0.033584,0.077213,0.026976,-0.026057,-0.024171,-0.060402,-0.022088,0.036667,0.050913,0.044288,0.050425,0.020312,-0.079651,0.039995,0.025213,-0.093921,0.123022,-0.018795,-0.019389,0.002286,0.001979,-0.101728,0.107971,-0.068966,0.035741,0.015628,-0.035574,0.153279,0.094290,-0.104906,-0.052320,0.123525,0.008491,-0.095255,-0.007215,0.046057,0.073950,0.000288,0.048581,0.015934,0.018141,-0.025546,-0.197751,-0.042662,-0.052096,-0.026476,-0.062207,0.041462,-0.126658,-0.054443,0.032953,-0.042276,-0.002271,0.064819,-0.000886,0.057284,0.151726,-0.005298,0.027951,-0.030457,-0.006905,-0.063591,0.038784,0.042528,0.009519,-0.006915,0.012526,-0.026987,-0.031367,-0.032849,-0.069462,0.078398,-0.077442,0.025363,0.015721,0.006499,0.050169,0.013559,-0.041564,-0.021559,0.038734,-0.048881,-0.014219,0.018850,-0.076032,-0.031383,-0.053150,0.073602,-0.060924,0.059121,-0.135463,-0.065702,0.017648,0.056463,-0.019903,0.036354,0.063048,0.043614,-0.058998,0.036254,-0.089223,-0.017289,-0.016416,0.103582,-0.060645,-0.003059,-0.069493,0.018295,-0.066589,0.049994,0.053174,-0.001443,-0.069640,0.044240,-0.050163,-0.073309,0.004212,0.096908,0.120702,-0.067013,-0.009905,-0.043493,-0.076316,0.014215,-0.031754,-0.021288,-0.004769,-0.137067,-0.024747,-0.031678,0.015084,0.031505,-0.103574,-0.066272,0.042509,0.060396,-0.059968,0.002428,-0.024155,0.063192,-0.077420,-0.094139,0.057205,0.022803,0.069838,-0.001844,0.020190,-0.049075,-0.048242,0.025786,0.067670,-0.152904,0.038227,0.090410,-0.012939,-0.040886,-0.011291,0.012498,-0.020443,-0.048081,0.018145,0.055328,0.065196,-0.062635,-0.106917,0.064730,0.044867,-0.068668,-0.037746,0.057211,-0.034426,0.051337,-0.025572,0.046295,-0.011716,-0.006654,-0.030194,-0.051332,0.026101,0.104883,-0.007145,-0.070217,-0.000867,0.002503,-0.064973,0.037842,0.021236,0.023925,-0.017935,-0.079966,0.055120,-0.034364,0.071393,-0.029756,0.055380,-0.002203,0.004057,0.018498,0.023873,-0.042996,-0.027251,-0.031769,-0.023764,-0.056463,-0.019280,0.102642,-0.095517,0.071215,0.080328,-0.003316,0.068579,0.052600,0.053909,0.108883,0.012581,0.043106,0.031879,0.054328,-0.030787,-0.028546,-0.068817,-0.013480,0.036747,0.036505,-0.026683,0.034958,0.075650,0.001530,0.077361,-0.012978,-0.059580,-0.003257,-0.075410,0.026864,-0.090516,0.148559,-0.067887,-0.009229,0.107859,0.011224,0.026525,-0.020502,0.029137,0.059563,0.094475,0.072240,-0.095192,0.006081,0.021361,0.136805,-0.011188,-0.004593,0.007071,-0.166692,0.081480,0.042853,-0.013095,0.103005,0.036355,-0.033840,-0.017550,0.021768,0.056411,-0.041009,-0.057657,0.068774,0.006003,0.083118,-0.005328]}", + @"{""id"":""6"", ""word"":""time_NOUN"", ""embedding"":[0.028551,-0.088598,0.050082,0.007181,-0.104172,0.101837,0.035492,-0.016076,-0.036470,-0.031597,0.008517,0.081070,0.005958,0.011227,0.055194,-0.085483,-0.028711,-0.041737,-0.087045,0.040034,-0.014075,0.115195,0.014005,-0.067294,0.107071,-0.100702,-0.009378,-0.124226,0.017455,0.003322,0.005558,-0.061389,0.162625,-0.079217,-0.014027,0.008225,0.009872,0.019838,0.037906,-0.081853,0.025483,0.006533,-0.027086,-0.061254,0.021571,-0.058520,-0.030755,-0.037765,0.013022,0.099744,-0.015871,-0.040148,0.013853,0.174780,-0.030214,0.036497,-0.030496,-0.072386,0.129520,-0.039360,-0.012611,-0.044570,0.009870,0.027195,-0.018254,-0.064432,-0.027389,0.076080,0.017096,0.094141,-0.004854,-0.119384,-0.079426,-0.096194,-0.061261,-0.046198,-0.066695,-0.095495,0.071205,-0.035128,-0.109664,0.000560,-0.008151,0.001801,0.033679,0.077297,0.069809,0.126302,-0.106770,0.054766,-0.059580,-0.031521,0.100238,0.050529,-0.093611,0.014521,0.113995,0.047877,0.043517,0.046725,-0.064180,-0.012568,-0.001566,-0.058337,0.033448,0.017877,-0.060126,0.057385,-0.074544,0.033809,-0.063151,0.051488,-0.082470,0.012745,-0.012867,-0.022761,-0.073556,-0.064041,0.072679,0.004931,0.033317,-0.036562,0.097679,0.064440,0.006349,0.029687,0.079091,0.126180,0.017304,-0.055640,-0.004360,-0.071465,0.020999,0.020292,0.020851,-0.059998,0.026812,-0.025640,0.056559,0.062663,0.068633,0.003051,0.038869,-0.033266,-0.003146,-0.004676,-0.060268,-0.099329,-0.011529,-0.024929,0.052014,0.016963,0.007527,-0.074523,-0.107231,-0.095747,0.040862,0.055063,0.037597,0.030260,-0.060898,-0.035649,0.030211,-0.061037,-0.030174,0.025636,0.006325,-0.005351,-0.055750,0.042350,0.005378,-0.067135,-0.089138,-0.024828,-0.011560,0.063750,-0.050910,-0.029080,-0.003223,0.070240,-0.015769,0.085594,-0.091409,0.066143,-0.005699,-0.074672,0.015766,-0.008242,-0.016767,0.064410,-0.070908,-0.074450,0.005181,0.062501,-0.007251,-0.036160,-0.030252,-0.006774,-0.117676,0.085261,0.055248,0.023099,-0.001916,0.010082,0.004036,0.009452,0.009258,0.015016,0.040363,0.048659,0.021832,-0.100119,-0.093323,0.046102,0.003035,-0.010301,0.019116,0.004175,0.018846,0.026686,-0.200220,0.021341,-0.039848,0.050640,-0.026792,0.057337,-0.060018,0.013275,-0.043792,-0.020855,0.018315,-0.052217,-0.129547,-0.029040,-0.041891,-0.064859,-0.007379,-0.078831,-0.039299,-0.070625,-0.035255,0.058762,0.083695,0.085014,-0.052283,-0.005346,0.002431,0.030622,0.076632,-0.101137,0.037876,-0.031314,0.000538,-0.022329,0.090587,0.060532,-0.022718,-0.007348,0.040430,0.084318,-0.037918,-0.041699,0.064522,-0.004134,-0.056841,0.021044,0.019699,-0.000656,-0.051270,-0.011215,-0.010951,-0.015126,-0.095848,-0.043651,0.068587,-0.033160,0.112103,-0.069295,0.069698,-0.027304,-0.032045,0.011533,0.078525,0.012872,-0.072700,-0.018674,-0.048350,0.033240,0.006589,0.038484,0.049222,-0.036532,0.050987,0.054671,-0.073558,-0.075419,-0.051517,-0.082573,-0.022406,0.042652]}", + @"{""id"":""7"", ""word"":""take_VERB"", ""embedding"":[-0.085915,0.072050,0.061177,0.034385,-0.130897,-0.020019,0.041692,-0.001646,-0.000336,-0.085338,0.038205,0.018875,-0.083286,-0.061232,0.016179,0.066189,0.060670,0.000543,-0.135004,0.049699,-0.012316,-0.007948,-0.024639,-0.000349,-0.021084,-0.054148,0.061099,-0.111185,0.005860,-0.023507,0.029399,0.055637,0.049720,0.019389,0.024483,0.000545,-0.015728,-0.084071,-0.045200,-0.039850,-0.057122,-0.043768,-0.087916,-0.003074,0.094422,0.002184,0.054202,-0.081253,0.060053,0.047244,0.096669,0.020689,0.049919,0.160756,0.074185,0.068677,0.081737,0.046915,0.130613,0.003062,-0.094222,-0.038692,0.007875,0.048645,0.055408,-0.037991,-0.064129,0.057689,0.079305,0.059282,0.004156,0.006521,0.049386,-0.046141,-0.058870,-0.012268,-0.029792,-0.073071,-0.006719,-0.020783,-0.053037,-0.029306,-0.123163,-0.016259,-0.005775,-0.005656,0.082892,0.182866,-0.053775,0.081097,0.031257,-0.082803,0.072860,0.058470,-0.042368,-0.044028,-0.007983,0.027884,0.029243,-0.005043,-0.027391,-0.081547,0.087199,-0.054481,0.092473,0.073842,0.080296,0.116464,-0.060440,-0.112892,0.000785,0.031157,-0.055366,-0.042485,-0.029086,-0.058525,0.026030,-0.063302,0.052599,-0.005851,0.048085,-0.056849,-0.002640,0.003858,-0.009217,0.042292,0.014251,0.111839,0.003789,0.004826,-0.055015,0.037018,0.051596,0.034754,0.054819,-0.125969,-0.021572,-0.046395,0.050502,0.015387,0.046864,0.004564,0.036650,0.010489,0.024583,-0.036233,-0.043142,-0.063384,-0.008670,0.051395,0.047257,-0.015780,0.067077,-0.027996,-0.014117,-0.058627,0.018429,0.073644,0.050070,0.010536,0.027303,-0.002755,0.026361,-0.045678,-0.013685,0.031982,-0.006479,0.034540,-0.003912,0.057480,-0.007845,-0.097392,0.028641,-0.050393,0.085079,0.011244,-0.097678,0.007091,-0.027963,0.022363,0.002283,0.037801,-0.037196,0.169746,-0.038624,0.078767,-0.001875,0.054219,-0.052247,0.026505,-0.138177,-0.021248,-0.073399,0.058894,-0.001102,-0.008011,-0.044078,-0.001408,-0.078455,0.096208,-0.010742,-0.043702,0.079016,-0.018590,0.013306,0.022266,0.032821,0.004880,-0.002337,0.045593,-0.024818,-0.007891,0.008684,-0.061167,-0.045740,-0.051571,-0.049655,-0.008531,-0.006200,0.048838,-0.072153,0.049921,-0.059215,0.127064,0.040236,0.098747,-0.042659,-0.028465,-0.056385,0.065828,0.036884,-0.096360,0.006464,0.006201,0.056877,0.071954,0.114779,-0.087398,0.008548,0.044174,0.007840,-0.017414,0.046436,0.096034,-0.127091,-0.028683,-0.013463,-0.020997,-0.010382,-0.030867,0.037462,-0.108436,0.018027,-0.034779,0.058547,-0.068791,0.021402,-0.011831,0.002013,0.060412,-0.078119,-0.019209,0.013182,-0.094492,0.110161,-0.063736,0.046850,0.022188,0.007599,-0.007369,-0.058936,0.056017,-0.049103,-0.057295,0.058677,0.147318,0.107422,-0.079093,-0.010623,0.073036,0.024813,-0.045329,0.095850,0.046119,-0.086194,0.039533,-0.001355,0.052867,0.066449,-0.062630,0.020811,-0.032284,-0.087716,0.034099,-0.028051,-0.021583,-0.030066,-0.009957,0.022330,0.032838]}", + @"{""id"":""8"", ""word"":""know_VERB"", ""embedding"":[-0.025984,0.067260,0.034687,0.040777,-0.161350,0.013020,-0.007056,-0.024733,0.058577,0.032559,-0.012518,0.108795,0.025453,-0.025084,-0.102243,-0.040177,-0.021312,-0.043816,-0.081668,0.091292,0.038419,0.055116,0.035742,-0.050125,-0.018741,-0.049817,0.012086,-0.079459,-0.000711,-0.023625,0.013238,0.074507,0.030027,-0.001057,0.035990,-0.050576,-0.003847,0.060832,0.011826,0.008473,-0.015987,-0.049248,-0.043038,-0.075997,0.000403,0.001103,-0.003824,-0.029661,0.098314,0.062112,-0.109037,-0.013936,-0.008971,0.112062,-0.019038,-0.048354,0.006206,-0.070438,0.065072,0.018215,-0.104006,-0.086250,0.071208,0.083047,-0.001287,-0.120081,-0.014862,0.129383,0.042387,-0.065375,0.006762,-0.073221,-0.010304,-0.115515,0.007240,0.022477,-0.058240,-0.010925,-0.000522,-0.142643,0.035225,0.062995,-0.036440,-0.023326,0.043814,0.082391,0.105983,0.029809,-0.019996,0.034061,0.027599,-0.003354,-0.002728,0.053269,-0.012316,-0.085819,0.030923,0.038884,-0.022784,0.034281,-0.092045,-0.079878,-0.006614,-0.093812,0.035342,0.094636,-0.057155,0.051942,0.024737,-0.080949,-0.065721,-0.039007,-0.005870,-0.073761,0.076176,-0.017809,0.005946,-0.080602,0.032073,-0.102764,0.038929,-0.075056,-0.013560,-0.018190,0.069482,0.083503,0.027585,0.023521,0.014033,-0.036928,-0.005933,0.030368,0.006602,0.030084,0.044758,-0.018216,0.035520,-0.012898,0.045371,-0.001641,0.110703,0.017023,0.005551,0.045749,-0.005427,-0.074813,-0.010974,-0.010148,0.095004,0.013014,-0.005125,0.016952,-0.030893,-0.062859,-0.080818,-0.051509,0.006347,0.010265,-0.063437,0.009479,0.009224,0.006066,-0.008744,-0.104529,-0.025839,0.070249,0.095893,-0.047559,0.013237,0.055287,0.009931,-0.023470,-0.098192,-0.030587,0.049573,-0.017828,-0.071414,-0.018803,-0.014656,-0.002473,0.036026,0.041413,-0.054189,0.018682,0.084050,-0.029306,-0.026105,-0.016993,-0.026153,0.102438,-0.018404,-0.094386,0.000044,0.060043,-0.082400,-0.121982,-0.031880,-0.045073,-0.080735,0.012308,0.069106,-0.024642,0.040432,0.055083,-0.017416,0.011888,-0.057825,-0.112030,-0.016259,0.037437,0.081257,-0.035797,-0.070929,-0.093251,-0.083276,-0.047928,-0.043073,-0.057629,0.043480,0.033591,-0.152529,0.020240,0.020165,0.082562,-0.033501,0.070734,-0.085496,0.029878,-0.066357,0.026343,-0.050701,-0.037989,-0.134138,0.017599,-0.078392,0.056217,-0.005646,0.014984,0.065454,-0.014985,-0.032853,0.037999,0.024869,0.056706,0.094115,0.001015,0.021370,0.050369,0.088182,-0.045817,-0.098003,-0.092237,0.017012,-0.017444,0.094234,-0.003846,-0.046259,-0.039507,0.024746,0.046465,-0.040855,0.020798,-0.003858,-0.007113,0.013065,-0.084681,0.009278,0.061606,-0.135605,-0.049061,0.016939,0.093177,0.044400,-0.003246,0.153727,0.036902,0.033454,-0.019347,-0.016304,-0.004672,0.098153,0.026940,0.091487,0.098230,-0.060728,0.079308,-0.048837,0.049123,0.109093,-0.037728,0.096776,-0.065414,0.025478,0.105321,-0.037531,-0.086120,0.071593,-0.032246,-0.014722,-0.013469]}", + @"{""id"":""9"", ""word"":""year_NOUN"", ""embedding"":[-0.025786,-0.042872,0.017601,0.063622,-0.103329,0.023634,0.054678,0.040233,-0.051739,-0.064657,0.023516,0.033084,0.090718,0.051757,-0.013683,-0.097553,0.067920,-0.079240,-0.076960,0.099556,-0.005586,0.087658,0.021216,-0.030638,0.090439,-0.043562,0.046424,-0.070301,-0.073923,-0.085469,-0.028303,-0.072659,0.055611,-0.092044,0.070280,-0.034189,0.078083,0.056718,0.015751,-0.049231,0.043795,0.046121,-0.010422,-0.021453,-0.048846,-0.048375,0.018076,-0.139829,-0.001987,0.070634,-0.099343,-0.025047,-0.002585,0.108708,-0.068641,0.086222,0.025860,-0.030051,0.036753,-0.005112,-0.008837,-0.009897,0.013929,0.026580,-0.013089,0.020542,-0.060059,0.062302,0.095074,0.023722,0.034571,-0.059392,-0.000980,-0.000784,-0.061180,-0.069526,-0.018580,-0.118761,-0.012066,-0.083601,-0.028266,0.088769,-0.012761,0.118279,-0.012628,0.020282,0.053124,-0.029784,-0.087240,0.028490,-0.002649,-0.028264,0.074022,0.044470,-0.046292,0.062624,0.027311,0.075023,-0.005248,0.025290,0.033313,-0.107309,0.042446,-0.011116,0.009652,0.060412,0.024261,0.098144,-0.125430,-0.014803,-0.042813,-0.010587,-0.018827,-0.033675,0.056361,-0.035354,-0.044778,-0.009897,0.011386,0.035792,-0.002038,-0.031330,-0.009778,-0.001057,0.028198,-0.000704,0.023876,0.065053,0.046788,0.018013,-0.047985,-0.039999,-0.027081,0.060187,0.064933,-0.061414,0.052294,-0.003506,0.099521,-0.025471,0.085860,-0.024881,0.078323,-0.131082,0.120399,-0.000521,-0.042779,-0.023151,0.014987,-0.048307,-0.023339,0.023993,-0.050328,-0.055788,-0.087864,-0.023055,0.031015,0.043504,-0.011933,-0.047324,-0.022298,-0.056046,-0.028270,-0.041753,0.022536,0.032980,-0.093301,0.021712,0.044414,-0.027758,-0.076788,-0.021686,-0.067725,-0.004186,-0.008080,0.034993,-0.039559,-0.003645,-0.002335,-0.009184,0.008698,-0.002859,-0.058528,0.083969,0.069957,-0.043843,0.042897,-0.064762,0.021673,0.053250,-0.074874,-0.086903,-0.095449,-0.005371,0.064299,0.096931,0.043711,0.032208,-0.019445,0.214736,0.055460,-0.070967,0.015095,0.056014,-0.037825,0.039392,-0.034608,0.032038,0.103250,0.029820,0.038454,-0.064354,-0.128182,0.016288,0.015339,0.044159,-0.061186,-0.037290,-0.094278,0.066222,-0.148060,-0.024454,-0.018784,0.034114,0.040694,-0.008600,-0.029586,-0.024523,0.011347,0.001708,-0.033704,-0.051059,-0.074759,-0.042689,-0.059257,-0.045433,0.094440,-0.126478,0.037676,0.067256,0.119020,0.027962,0.120834,0.033210,-0.025278,-0.052161,0.016049,-0.021046,0.055670,-0.075887,-0.040669,-0.074454,-0.038057,0.063454,0.033169,0.035221,0.025169,0.037108,0.003785,0.011474,-0.060609,-0.065979,-0.102240,0.042050,0.028562,0.032571,-0.039410,-0.109837,-0.060135,0.050419,0.011264,-0.015715,0.044828,-0.046791,0.101858,-0.035604,-0.024885,-0.064720,0.043942]}", + }; + + private static readonly IReadOnlyList VectorDistanceTestCases = new List + { + MakeTest( + @"SELECT c.id AS Id, c.word AS Word " + + @"FROM c " + + @$"ORDER BY VectorDistance(""{Embedding}"", c.embedding, true, {{distanceFunction:'Cosine'}}) ASC", + Expectations.DocumentsAreInCosineDistanceOrder()), + MakeTest( + @"SELECT c.id AS Id, c.word AS Word " + + @"FROM c " + + @$"ORDER BY VectorDistance(""{Embedding}"", c.embedding, true, {{distanceFunction:'Cosine'}}) DESC", + Expectations.DocumentsAreInDescendingCosineDistanceOrder()), + MakeTest( + @"SELECT c.id AS Id, c.word AS Word " + + @"FROM c " + + @$"ORDER BY VectorDistance(""{Embedding}"", c.embedding, true, {{distanceFunction:'DotProduct'}}) ASC", + Expectations.DocumentsAreInDotProductDistanceOrder()), + MakeTest( + @"SELECT c.id AS Id, c.word AS Word " + + @"FROM c " + + @$"ORDER BY VectorDistance(""{Embedding}"", c.embedding, true, {{distanceFunction:'DotProduct'}}) DESC", + Expectations.DocumentsAreInDescendingDotProductDistanceOrder()), + MakeTest( + @"SELECT c.id AS Id, c.word AS Word " + + @"FROM c " + + @$"ORDER BY VectorDistance(""{Embedding}"", c.embedding, true, {{distanceFunction:'Euclidean'}}) ASC", + Expectations.DocumentsAreInEuclideanDistanceOrder()), + MakeTest( + @"SELECT c.id AS Id, c.word AS Word " + + @"FROM c " + + @$"ORDER BY VectorDistance(""{Embedding}"", c.embedding, true, {{distanceFunction:'Euclidean'}}) DESC", + Expectations.DocumentsAreInDescendingEuclideanDistanceOrder()) + }; + + [Ignore("Requires new build of emulator and ServiceInterop from the 0318 branch")] + [TestMethod] + public async Task VectorDistanceTestsAsync() + { + await this.CreateIngestQueryDeleteAsync( + ConnectionModes.Direct | ConnectionModes.Gateway, + CollectionTypes.SinglePartition | CollectionTypes.MultiPartition, + Documents, + RunVectorDistanceTestsAsync); + } + + private static async Task RunVectorDistanceTestsAsync(Container container, IReadOnlyList _) + { + foreach (TestCase testCase in VectorDistanceTestCases) + { + Trace.WriteLine("Executing test case: "); + Trace.WriteLine(testCase.Query); + FeedIterator iterator = container.GetItemQueryIterator(testCase.Query); + + List documents = new List(); + while (iterator.HasMoreResults) + { + FeedResponse response = await iterator.ReadNextAsync(); + Assert.IsTrue(response.StatusCode.IsSuccess()); + + documents.AddRange(response.Resource); + } + + if (!testCase.Expected.SequenceEqual(documents)) + { + Trace.WriteLine("Mismatch between expected and actual results:"); + foreach (Document document in documents) + { + Trace.WriteLine(System.Text.Json.JsonSerializer.Serialize(document)); + } + + Assert.Fail(); + } + } + } + + private static TestCase MakeTest(string query, IEnumerable expected) + { + return new TestCase(query, expected); + } + + private sealed class TestCase + { + public string Query { get; } + + public IEnumerable Expected { get; } + + public TestCase(string query, IEnumerable expected) + { + this.Query = query ?? throw new ArgumentNullException(nameof(query)); + this.Expected = expected ?? throw new ArgumentNullException(nameof(expected)); + } + } + + private static class Expectations + { + public static IEnumerable DocumentsAreInCosineDistanceOrder() + { + return new[] + { + new Document {Id = "0", Word = "sayVERB"}, + new Document {Id = "8", Word = "know_VERB"}, + new Document {Id = "1", Word = "go_VERB"}, + new Document {Id = "3", Word = "get_VERB"}, + new Document {Id = "7", Word = "take_VERB"}, + new Document {Id = "5", Word = "see_VERB"}, + new Document {Id = "6", Word = "time_NOUN"}, + new Document {Id = "2", Word = "make_VERB"}, + new Document {Id = "4", Word = "one_NUM"}, + new Document {Id = "9", Word = "year_NOUN"}, + }; + } + + public static IEnumerable DocumentsAreInDotProductDistanceOrder() + { + return DocumentsAreInCosineDistanceOrder(); + } + + public static IEnumerable DocumentsAreInEuclideanDistanceOrder() + { + return DocumentsAreInCosineDistanceOrder(); + } + + public static IEnumerable DocumentsAreInDescendingCosineDistanceOrder() + { + return DocumentsAreInCosineDistanceOrder() + .Reverse(); + } + + public static IEnumerable DocumentsAreInDescendingDotProductDistanceOrder() + { + return DocumentsAreInDotProductDistanceOrder() + .Reverse(); + } + + public static IEnumerable DocumentsAreInDescendingEuclideanDistanceOrder() + { + return DocumentsAreInEuclideanDistanceOrder() + .Reverse(); + } + } + + private sealed class Document + { + public string Id { get; set; } + public string Word { get; set; } + } + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Query/QueryBaselineTest.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Query/QueryBaselineTest.cs index c3cc566526..2c0f9d32b1 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Query/QueryBaselineTest.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.EmulatorTests/Query/QueryBaselineTest.cs @@ -3,21 +3,14 @@ //------------------------------------------------------------ namespace Microsoft.Azure.Cosmos.Services.Management.Tests.BaselineTest { - using Microsoft.Azure.Cosmos.EmulatorTests.Query; using Microsoft.Azure.Cosmos.SDK.EmulatorTests; - using Microsoft.Azure.Cosmos.Services.Management.Tests; using Microsoft.VisualStudio.TestTools.UnitTesting; using Newtonsoft.Json; using System; - using System.Text.RegularExpressions; using System.Threading.Tasks; using System.Xml; using System.Collections.Generic; - using System.Linq; - using Microsoft.Azure.Cosmos.CosmosElements; - using Microsoft.Azure.Cosmos.Serialization.HybridRow.Schemas; using Newtonsoft.Json.Linq; - using System.Collections.Concurrent; [Microsoft.Azure.Cosmos.SDK.EmulatorTests.TestClass] public class QueryBaselineTest : BaselineTests diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Mocks/MockRequestHelper.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Mocks/MockRequestHelper.cs index 7d3c88e84c..ae7a34e233 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Mocks/MockRequestHelper.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Mocks/MockRequestHelper.cs @@ -144,7 +144,8 @@ public static StoreResponse GetStoreResponse(DocumentServiceRequest request) CurrentReplicaSetSize = "1", CurrentWriteQuorum = "1", CurrentResourceQuotaUsage = "documentSize=0;documentsSize=1;documentsCount=1;collectionSize=1;", - GlobalCommittedLSN = "-1", + GlobalCommittedLSN = "-1", + ItemCount = "1", LSN = "2540", LocalLSN = "2540", LastStateChangeUtc = "Wed, 18 Aug 2021 20:30:05.117 GMT", @@ -253,7 +254,8 @@ public static StoreResponse GetStoreResponse(DocumentServiceRequest request) if (request.ResourceType == ResourceType.Document && request.OperationType == OperationType.ReadFeed) - { + { + headers.ItemCount = "1"; return new StoreResponse() { ResponseBody = new MemoryStream(MockRequestHelper.testItemFeedResponsePayload, 0, MockRequestHelper.testItemFeedResponsePayload.Length, writable: false, publiclyVisible: true), diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Program.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Program.cs index b9882b337f..9b109428b7 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Program.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Program.cs @@ -4,14 +4,9 @@ namespace Microsoft.Azure.Cosmos.Performance.Tests { - using System; using System.Collections.Generic; - using System.Linq; - using System.Security.Cryptography.X509Certificates; - using BenchmarkDotNet.Configs; using BenchmarkDotNet.Reports; using BenchmarkDotNet.Running; - using Newtonsoft.Json; class Program { diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Query/OrderByPipelineStageBenchmark.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Query/OrderByPipelineStageBenchmark.cs new file mode 100644 index 0000000000..4ec2009758 --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Performance.Tests/Query/OrderByPipelineStageBenchmark.cs @@ -0,0 +1,325 @@ +namespace Microsoft.Azure.Cosmos.Performance.Tests.Query +{ + using System; + using System.Collections.Generic; + using System.Linq; + using System.Threading; + using System.Threading.Tasks; + using BenchmarkDotNet.Attributes; + using Microsoft.Azure.Cosmos; + using Microsoft.Azure.Cosmos.ChangeFeed.Pagination; + using Microsoft.Azure.Cosmos.CosmosElements; + using Microsoft.Azure.Cosmos.Pagination; + using Microsoft.Azure.Cosmos.Query.Core; + using Microsoft.Azure.Cosmos.Query.Core.Monads; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; + using Microsoft.Azure.Cosmos.ReadFeed.Pagination; + using Microsoft.Azure.Cosmos.Tracing; + using Microsoft.Azure.Cosmos.CosmosElements.Numbers; + + [MemoryDiagnoser] + public class OrderByPipelineStageBenchmark + { + private const int EndUserPageSize = 100; + + private const int MaxConcurrency = 10; + + private static readonly SqlQuerySpec SqlQuerySpec = new SqlQuerySpec(@" + SELECT c._rid AS _rid, [{""item"": c.index}] AS orderByItems, {""index"": c.index} AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.index"); + + private static readonly IReadOnlyList OrderByColumns = new List + { + new OrderByColumn("c.index", SortOrder.Ascending) + }; + + private static readonly IDocumentContainer StreamingContainer = MockDocumentContainer.Create(streaming: true); + + private static readonly IDocumentContainer NonStreamingContainer = MockDocumentContainer.Create(streaming: false); + + [Benchmark(Baseline = true)] + public Task StreamingOrderByPipelineStage() + { + return CreateAndRunPipeline(StreamingContainer); + } + + [Benchmark] + public Task NonStreamingOrderByPipelineStage() + { + return CreateAndRunPipeline(NonStreamingContainer); + } + + private static async Task CreateAndRunPipeline(IDocumentContainer documentContainer) + { + IReadOnlyList ranges = await documentContainer.GetFeedRangesAsync( + trace: NoOpTrace.Singleton, + cancellationToken: default); + + TryCatch pipelineStage = OrderByCrossPartitionQueryPipelineStage.MonadicCreate( + documentContainer: documentContainer, + sqlQuerySpec: SqlQuerySpec, + targetRanges: ranges, + partitionKey: null, + orderByColumns: OrderByColumns, + queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: EndUserPageSize), + maxConcurrency: MaxConcurrency, + continuationToken: null); + + IQueryPipelineStage pipeline = pipelineStage.Result; + + int documentCount = 0; + while (await pipeline.MoveNextAsync(NoOpTrace.Singleton, CancellationToken.None)) + { + TryCatch tryGetQueryPage = pipeline.Current; + QueryPage queryPage = tryGetQueryPage.Result; + + List documents = new List(queryPage.Documents.Count); + foreach (CosmosElement document in queryPage.Documents) + { + documents.Add(document.ToString()); + } + + documentCount += documents.Count; + } + } + + private sealed class MockDocumentContainer : IDocumentContainer + { + private const int LeafPageCount = 100; + + private const int PageSize = 1000; + + private const string ActivityId = "ActivityId"; + + private const int QueryCharge = 42; + + private const string CollectionRid = "1HNeAM-TiQY="; + + private const string _rid = "_rid"; + + private const string orderByItems = "orderByItems"; + + private const string payload = "payload"; + + private const string item = "item"; + + private const string Index = "index"; + + private static readonly IReadOnlyDictionary AdditionalHeaders = new Dictionary + { + ["x-ms-query-test-header"] = "This is a test", + }; + + private readonly IReadOnlyDictionary> pages; + + private MockDocumentContainer(IReadOnlyDictionary> pages) + { + this.pages = pages ?? throw new ArgumentNullException(nameof(pages)); + } + + public static IDocumentContainer Create(bool streaming) + { + IReadOnlyList feedRanges = new List + { + new FeedRangeEpk(new Documents.Routing.Range(string.Empty, "AA", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("AA", "BB", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("BB", "CC", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("CC", "DD", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("DD", "EE", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("EE", "FF", true, false)), + }; + + int feedRangeIndex = 0; + Dictionary> pages = new Dictionary>(); + foreach (FeedRangeEpk feedRange in feedRanges) + { + int index = feedRangeIndex; + Dictionary leafPages = new Dictionary(); + for (int pageIndex = 0; pageIndex < LeafPageCount; ++pageIndex) + { + CosmosElement state = pageIndex == 0 ? CosmosNull.Create() : CosmosString.Create(pageIndex.ToString()); + CosmosElement continuationToken = pageIndex == LeafPageCount - 1 ? null : CosmosString.Create((pageIndex + 1).ToString()); + + List documents = new List(PageSize); + for (int documentCount = 0; documentCount < PageSize; ++documentCount) + { + documents.Add(CreateDocument(index)); + index += feedRanges.Count; + } + + QueryPage queryPage = new QueryPage( + documents: documents, + requestCharge: QueryCharge, + activityId: ActivityId, + cosmosQueryExecutionInfo: null, + distributionPlanSpec: null, + disallowContinuationTokenMessage: null, + state: continuationToken != null ? new QueryState(continuationToken) : null, + additionalHeaders: AdditionalHeaders, + streaming: streaming); + + leafPages.Add(state, queryPage); + } + + pages.Add(feedRange, leafPages); + ++feedRangeIndex; + } + + return new MockDocumentContainer(pages); + } + + private static CosmosElement CreateDocument(int index) + { + Documents.ResourceId resourceId = Documents.ResourceId.NewCollectionChildResourceId( + CollectionRid, + (ulong)index, + Documents.ResourceType.Document); + + CosmosElement document = CosmosObject.Create(new Dictionary + { + [_rid] = CosmosString.Create(resourceId.ToString()), + [orderByItems] = CosmosArray.Create(new List + { + CosmosObject.Create(new Dictionary + { + [item] = CosmosNumber64.Create(index) + }) + }), + [payload] = CosmosObject.Create(new Dictionary + { + [Index] = CosmosNumber64.Create(index) + }) + }); + + return document; + } + + public Task ChangeFeedAsync(FeedRangeState feedRangeState, ChangeFeedPaginationOptions changeFeedPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task CreateItemAsync(CosmosObject payload, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> GetChildRangeAsync(FeedRangeInternal feedRange, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> GetFeedRangesAsync(ITrace trace, CancellationToken cancellationToken) + { + return Task.FromResult( + this.pages.Keys + .Cast() + .ToList()); + } + + public Task GetResourceIdentifierAsync(ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task MergeAsync(FeedRangeInternal feedRange1, FeedRangeInternal feedRange2, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> MonadicChangeFeedAsync(FeedRangeState feedRangeState, ChangeFeedPaginationOptions changeFeedPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> MonadicCreateItemAsync(CosmosObject payload, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task>> MonadicGetChildRangeAsync(FeedRangeInternal feedRange, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task>> MonadicGetFeedRangesAsync(ITrace trace, CancellationToken cancellationToken) + { + return Task.FromResult( + TryCatch>.FromResult( + this.pages.Keys + .Cast() + .ToList())); + } + + public Task> MonadicGetResourceIdentifierAsync(ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task MonadicMergeAsync(FeedRangeInternal feedRange1, FeedRangeInternal feedRange2, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public async Task> MonadicQueryAsync(SqlQuerySpec sqlQuerySpec, FeedRangeState feedRangeState, QueryPaginationOptions queryPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + CosmosElement state = feedRangeState.State?.Value ?? CosmosNull.Create(); + QueryPage queryPage = this.pages[feedRangeState.FeedRange][state]; + + await Task.Delay(TimeSpan.FromMilliseconds(2)); + return TryCatch.FromResult(queryPage); + } + + public Task> MonadicReadFeedAsync(FeedRangeState feedRangeState, ReadFeedPaginationOptions readFeedPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> MonadicReadItemAsync(CosmosElement partitionKey, string identifer, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task MonadicRefreshProviderAsync(ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task MonadicSplitAsync(FeedRangeInternal feedRange, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task QueryAsync(SqlQuerySpec sqlQuerySpec, FeedRangeState feedRangeState, QueryPaginationOptions queryPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task ReadFeedAsync(FeedRangeState feedRangeState, ReadFeedPaginationOptions readFeedPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task ReadItemAsync(CosmosElement partitionKey, string identifier, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task RefreshProviderAsync(ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task SplitAsync(FeedRangeInternal feedRange, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + } + + + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/BaselineTest/TestBaseline/TraceWriterBaselineTests.ScenariosAsync.xml b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/BaselineTest/TestBaseline/TraceWriterBaselineTests.ScenariosAsync.xml index 27ddc7bc78..fb5f00ce1c 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/BaselineTest/TestBaseline/TraceWriterBaselineTests.ScenariosAsync.xml +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/BaselineTest/TestBaseline/TraceWriterBaselineTests.ScenariosAsync.xml @@ -1,1628 +1,1626 @@ - - - - ReadFeed - (ReadFeedCrossFeedRangeState.CreateFromBeginning().FeedRangeStates), - new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default); - - int numChildren = 1; // One extra since we need to read one past the last user page to get the null continuation. - TraceForBaselineTesting rootTrace; - using (rootTrace = TraceForBaselineTesting.GetRootTrace()) - { - while (await enumerator.MoveNextAsync(rootTrace)) - { - numChildren++; - } - } - - Assert.AreEqual(numChildren, rootTrace.Children.Count); -]]> - - - - - - - - - ChangeFeed - ( - ChangeFeedCrossFeedRangeState.CreateFromBeginning().FeedRangeStates), - new ChangeFeedPaginationOptions( - ChangeFeedMode.Incremental, - pageSizeHint: int.MaxValue), - cancellationToken: default); - - int numChildren = 0; - TraceForBaselineTesting rootTrace; - using (rootTrace = TraceForBaselineTesting.GetRootTrace()) - { - while (await enumerator.MoveNextAsync(rootTrace)) - { - numChildren++; - - if (enumerator.Current.Result.Page is ChangeFeedNotModifiedPage) - { - break; - } - } - } - - Assert.AreEqual(numChildren, rootTrace.Children.Count); -]]> - - - - - - - - - Query - - - - - - - + + + + ReadFeed + (ReadFeedCrossFeedRangeState.CreateFromBeginning().FeedRangeStates), + new ReadFeedPaginationOptions(pageSizeHint: 10)); + + int numChildren = 1; // One extra since we need to read one past the last user page to get the null continuation. + TraceForBaselineTesting rootTrace; + using (rootTrace = TraceForBaselineTesting.GetRootTrace()) + { + while (await enumerator.MoveNextAsync(rootTrace, cancellationToken: default)) + { + numChildren++; + } + } + + Assert.AreEqual(numChildren, rootTrace.Children.Count); +]]> + + + + + + + + + ChangeFeed + ( + ChangeFeedCrossFeedRangeState.CreateFromBeginning().FeedRangeStates), + new ChangeFeedPaginationOptions( + ChangeFeedMode.Incremental, + pageSizeHint: int.MaxValue)); + + int numChildren = 0; + TraceForBaselineTesting rootTrace; + using (rootTrace = TraceForBaselineTesting.GetRootTrace()) + { + while (await enumerator.MoveNextAsync(rootTrace, cancellationToken: default)) + { + numChildren++; + + if (enumerator.Current.Result.Page is ChangeFeedNotModifiedPage) + { + break; + } + } + } + + Assert.AreEqual(numChildren, rootTrace.Children.Count); +]]> + + + + + + + + + Query + + + + + + + \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/ChangeFeed/CrossPartitionChangeFeedAsyncEnumeratorTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/ChangeFeed/CrossPartitionChangeFeedAsyncEnumeratorTests.cs index 8056fcf076..5ad11ec019 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/ChangeFeed/CrossPartitionChangeFeedAsyncEnumeratorTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/ChangeFeed/CrossPartitionChangeFeedAsyncEnumeratorTests.cs @@ -34,10 +34,9 @@ public async Task NoChangesAsync() { new FeedRangeState(FeedRangeEpk.FullRange, ChangeFeedState.Beginning()) }), - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); - Assert.IsTrue(await enumerator.MoveNextAsync()); + Assert.IsTrue(await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)); Assert.IsTrue(enumerator.Current.Succeeded); Assert.IsTrue(enumerator.Current.Result.Page is ChangeFeedNotModifiedPage); Assert.IsNotNull(enumerator.Current.Result.State); @@ -54,16 +53,15 @@ public async Task SomeChangesAsync() { new FeedRangeState(FeedRangeEpk.FullRange, ChangeFeedState.Beginning()) }), - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); // First page should be true and skip the 304 not modified - Assert.IsTrue(await enumerator.MoveNextAsync()); + Assert.IsTrue(await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)); Assert.IsTrue(enumerator.Current.Succeeded); Assert.IsTrue(enumerator.Current.Result.Page is ChangeFeedSuccessPage); // Second page should surface up the 304 - Assert.IsTrue(await enumerator.MoveNextAsync()); + Assert.IsTrue(await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)); Assert.IsTrue(enumerator.Current.Succeeded); Assert.IsTrue(enumerator.Current.Result.Page is ChangeFeedNotModifiedPage); } @@ -82,8 +80,7 @@ public async Task StartFromBeginningAsync(bool useContinuations) { new FeedRangeState(FeedRangeEpk.FullRange, ChangeFeedState.Beginning()) }), - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); (int globalCount, double _) = await (useContinuations ? DrainWithUntilNotModifiedWithContinuationTokens(documentContainer, enumerator) @@ -105,8 +102,7 @@ public async Task StartFromTimeAsync(bool useContinuations) { new FeedRangeState(FeedRangeEpk.FullRange, ChangeFeedState.Time(DateTime.UtcNow)) }), - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); for (int i = 0; i < numItems; i++) { @@ -143,8 +139,7 @@ public async Task StartFromNowAsync(bool useContinuations) { new FeedRangeState(FeedRangeEpk.FullRange, ChangeFeedState.Now()) }), - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); (int globalCount, double _) = await (useContinuations ? DrainWithUntilNotModifiedWithContinuationTokens(documentContainer, enumerator) @@ -221,8 +216,7 @@ public async Task ShouldReturnNotModifiedAfterCyclingOnAllRanges(int partitions) CrossPartitionChangeFeedAsyncEnumerator enumerator = CrossPartitionChangeFeedAsyncEnumerator.Create( documentContainer.Object, state, - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); (int _, double requestCharge) = await DrainUntilNotModifedAsync(enumerator); @@ -323,7 +317,13 @@ public async Task ShouldSkipNotModifiedAndReturnResults() It.IsAny(), It.IsAny())).ReturnsAsync( (FeedRangeState state, ChangeFeedPaginationOptions options, ITrace trace, CancellationToken token) - => TryCatch.FromResult(new ChangeFeedSuccessPage(content: new MemoryStream(Encoding.UTF8.GetBytes("{\"Documents\": [], \"_count\": 0, \"_rid\": \"asdf\"}")), requestCharge: 5, activityId: string.Empty, additionalHeaders: default, state.State))); + => TryCatch.FromResult(new ChangeFeedSuccessPage( + content: new MemoryStream(Encoding.UTF8.GetBytes("{\"Documents\": [], \"_count\": 0, \"_rid\": \"asdf\"}")), + requestCharge: 5, + itemCount: 0, + activityId: string.Empty, + additionalHeaders: default, + state.State))); // Returns a 304 with 1RU charge on CC-FF documentContainer.Setup(c => c.MonadicChangeFeedAsync( @@ -337,8 +337,7 @@ public async Task ShouldSkipNotModifiedAndReturnResults() CrossPartitionChangeFeedAsyncEnumerator enumerator = CrossPartitionChangeFeedAsyncEnumerator.Create( documentContainer.Object, state, - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); (int _, double requestCharge) = await DrainUntilSuccessAsync(enumerator); @@ -399,12 +398,11 @@ public async Task ShouldReturnTryCatchOnException() CrossPartitionChangeFeedAsyncEnumerator enumerator = CrossPartitionChangeFeedAsyncEnumerator.Create( documentContainer.Object, state, - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); try { - await enumerator.MoveNextAsync(NoOpTrace.Singleton); + await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default); Assert.Fail("Should have thrown"); } catch (Exception caughtException) @@ -415,7 +413,7 @@ public async Task ShouldReturnTryCatchOnException() // Should be able to read MoveNextAsync again try { - await enumerator.MoveNextAsync(NoOpTrace.Singleton); + await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default); Assert.Fail("Should have thrown"); } catch (Exception caughtException) @@ -428,7 +426,7 @@ public async Task ShouldReturnTryCatchOnException() { int globalCount = 0; double requestCharge = 0; - while (await enumerator.MoveNextAsync()) + while (await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { Assert.IsTrue(enumerator.Current.Succeeded); requestCharge += enumerator.Current.Result.Page.RequestCharge; @@ -447,7 +445,7 @@ public async Task ShouldReturnTryCatchOnException() { int globalCount = 0; double requestCharge = 0; - while (await enumerator.MoveNextAsync()) + while (await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { Assert.IsTrue(enumerator.Current.Succeeded); requestCharge += enumerator.Current.Result.Page.RequestCharge; @@ -469,7 +467,7 @@ public async Task ShouldReturnTryCatchOnException() List globalChanges = new List(); while (true) { - if (!await enumerator.MoveNextAsync()) + if (!await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { throw new InvalidOperationException(); } @@ -488,8 +486,7 @@ public async Task ShouldReturnTryCatchOnException() enumerator = CrossPartitionChangeFeedAsyncEnumerator.Create( documentContainer, enumerator.Current.Result.State, - ChangeFeedPaginationOptions.Default, - cancellationToken: default); + ChangeFeedPaginationOptions.Default); } return (globalChanges.Count, requestCharge); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/ChangeFeed/NetworkAttachedDocumentContainerTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/ChangeFeed/NetworkAttachedDocumentContainerTests.cs index a8f5c4810c..2095773937 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/ChangeFeed/NetworkAttachedDocumentContainerTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/ChangeFeed/NetworkAttachedDocumentContainerTests.cs @@ -62,7 +62,8 @@ public async Task MonadicChangeFeedAsync_ChangeFeedMode_Incremental() ResponseMessage response = new ResponseMessage(System.Net.HttpStatusCode.NotModified); response.Headers.ETag = Guid.NewGuid().ToString(); response.Headers.ActivityId = Guid.NewGuid().ToString(); - response.Headers.RequestCharge = 1; + response.Headers.RequestCharge = 1; + response.Headers[HttpConstants.HttpHeaders.ItemCount] = "0"; context.SetupSequence(c => c.ProcessResourceOperationStreamAsync( It.IsAny(), @@ -121,7 +122,8 @@ public async Task MonadicChangeFeedAsync_ChangeFeedMode_FullFidelity() ResponseMessage response = new ResponseMessage(System.Net.HttpStatusCode.NotModified); response.Headers.ETag = Guid.NewGuid().ToString(); response.Headers.ActivityId = Guid.NewGuid().ToString(); - response.Headers.RequestCharge = 1; + response.Headers.RequestCharge = 1; + response.Headers[HttpConstants.HttpHeaders.ItemCount] = "0"; context.SetupSequence(c => c.ProcessResourceOperationStreamAsync( It.IsAny(), diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/CosmosConflictTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/CosmosConflictTests.cs index cbaf45fe88..b488455965 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/CosmosConflictTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/CosmosConflictTests.cs @@ -45,7 +45,8 @@ public async Task QueryConflicts() ResponseMessage responseMessage = new ResponseMessage(HttpStatusCode.OK) { Content = new MemoryStream(Encoding.UTF8.GetBytes(@"{ ""Conflicts"": [{ ""id"": ""Test""}]}")) - }; + }; + responseMessage.Headers.Add(HttpConstants.HttpHeaders.ItemCount, "1"); return Task.FromResult(responseMessage); }); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/CosmosJsonSerializerUnitTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/CosmosJsonSerializerUnitTests.cs index 227785f791..43482842ff 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/CosmosJsonSerializerUnitTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/CosmosJsonSerializerUnitTests.cs @@ -424,7 +424,6 @@ private ResponseMessage CreateQueryResponse() ResponseMessage cosmosResponse = QueryResponse.CreateSuccess( cosmosElements, 1, - Encoding.UTF8.GetByteCount(serializedItem), new CosmosQueryResponseMessageHeaders( continauationToken: null, disallowContinuationTokenMessage: null, diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/BufferedPartitionRangeEnumeratorTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/BufferedPartitionRangeEnumeratorTests.cs index 62114fcdf9..7b8133c2ea 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/BufferedPartitionRangeEnumeratorTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/BufferedPartitionRangeEnumeratorTests.cs @@ -5,10 +5,8 @@ using Microsoft.Azure.Cosmos.Pagination; using Microsoft.Azure.Cosmos.Query.Core.Monads; using Microsoft.VisualStudio.TestTools.UnitTesting; - using System; using System.Threading; using Microsoft.Azure.Cosmos.ReadFeed.Pagination; - using Microsoft.Azure.Cosmos.CosmosElements; using Microsoft.Azure.Cosmos.Tracing; using Microsoft.Azure.Cosmos.Tests.Query.Pipeline; @@ -134,8 +132,7 @@ public async Task TestSplitAsync(bool aggressivePrefetch, bool exercisePrefetch) (feedRangeState) => new ReadFeedPartitionRangeEnumerator( inMemoryCollection, feedRangeState: feedRangeState, - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10)), trace: NoOpTrace.Singleton); HashSet resourceIdentifiers = await this.DrainFullyAsync(enumerable); @@ -158,18 +155,14 @@ public async Task TestBufferPageAsync(bool aggressivePrefetch, bool exercisePref feedRangeState: new FeedRangeState( new FeedRangePartitionKeyRange(partitionKeyRangeId: "0"), ReadFeedState.Beginning()), - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), - cancellationToken: default) : + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10))) : new BufferedPartitionRangePageAsyncEnumerator( new ReadFeedPartitionRangeEnumerator( inMemoryCollection, feedRangeState: new FeedRangeState( new FeedRangePartitionKeyRange(partitionKeyRangeId: "0"), ReadFeedState.Beginning()), - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), - cancellationToken: default); + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10))); int count = 0; @@ -179,7 +172,7 @@ public async Task TestBufferPageAsync(bool aggressivePrefetch, bool exercisePref await enumerator.PrefetchAsync(trace: NoOpTrace.Singleton, default); } - while (await enumerator.MoveNextAsync(NoOpTrace.Singleton)) + while (await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { count += enumerator.Current.Result.GetRecords().Count; if (exercisePrefetch) @@ -209,18 +202,14 @@ public async Task TestMoveNextAndBufferPageAsync(bool aggressivePrefetch, bool e feedRangeState: new FeedRangeState( new FeedRangePartitionKeyRange(partitionKeyRangeId: "0"), ReadFeedState.Beginning()), - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), - cancellationToken: default) : + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10))) : new BufferedPartitionRangePageAsyncEnumerator( new ReadFeedPartitionRangeEnumerator( inMemoryCollection, feedRangeState: new FeedRangeState( new FeedRangePartitionKeyRange(partitionKeyRangeId: "0"), ReadFeedState.Beginning()), - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), - cancellationToken: default); + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10))); if (exercisePrefetch) { @@ -228,7 +217,7 @@ public async Task TestMoveNextAndBufferPageAsync(bool aggressivePrefetch, bool e } int count = 0; - while (await enumerator.MoveNextAsync(NoOpTrace.Singleton)) + while (await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { count += enumerator.Current.Result.GetRecords().Count; @@ -259,16 +248,12 @@ PartitionRangePageAsyncEnumerator CreateBufferedEnu new ReadFeedPartitionRangeEnumerator( documentContainer, feedRangeState: feedRangeState, - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), - cancellationToken: default) : + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10))) : new BufferedPartitionRangePageAsyncEnumerator( new ReadFeedPartitionRangeEnumerator( documentContainer, feedRangeState: feedRangeState, - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), - cancellationToken: default); + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10))); return enumerator; }; @@ -295,18 +280,14 @@ protected async override Task>> CreateEn feedRangeState: new FeedRangeState( new FeedRangePartitionKeyRange(partitionKeyRangeId: "0"), state ?? ReadFeedState.Beginning()), - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: cancellationToken), - cancellationToken: cancellationToken) : + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10))) : new BufferedPartitionRangePageAsyncEnumerator( new ReadFeedPartitionRangeEnumerator( inMemoryCollection, feedRangeState: new FeedRangeState( new FeedRangePartitionKeyRange(partitionKeyRangeId: "0"), state ?? ReadFeedState.Beginning()), - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: cancellationToken), - cancellationToken: cancellationToken); + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10))); if (exercisePrefetch) { @@ -315,7 +296,8 @@ protected async override Task>> CreateEn return new TracingAsyncEnumerator>( enumerator: enumerator, - trace: NoOpTrace.Singleton); + trace: NoOpTrace.Singleton, + cancellationToken: default); } } } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/CrossPartitionPartitionRangeEnumeratorTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/CrossPartitionPartitionRangeEnumeratorTests.cs index 20338aa768..33b2c9d42d 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/CrossPartitionPartitionRangeEnumeratorTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/CrossPartitionPartitionRangeEnumeratorTests.cs @@ -1,462 +1,457 @@ -//------------------------------------------------------------ -// Copyright (c) Microsoft Corporation. All rights reserved. -//------------------------------------------------------------ - -namespace Microsoft.Azure.Cosmos.Tests.Pagination -{ - using System; - using System.Collections.Generic; - using System.IO; - using System.Linq; - using System.Text; - using System.Threading; - using System.Threading.Tasks; - using Microsoft.Azure.Cosmos.Pagination; - using Microsoft.Azure.Cosmos.Query.Core.Monads; - using Microsoft.Azure.Cosmos.ReadFeed.Pagination; - using Microsoft.Azure.Cosmos.Resource.CosmosExceptions; - using Microsoft.Azure.Cosmos.Tests.Query.Pipeline; - using Microsoft.Azure.Cosmos.Tracing; - using Microsoft.VisualStudio.TestTools.UnitTesting; - using Moq; - - [TestClass] - public sealed class CrossPartitionPartitionRangeEnumeratorTests - { - [TestMethod] - public async Task Test429sAsync() - { - Implementation implementation = new Implementation(false); - await implementation.Test429sAsync(false); - } - - [TestMethod] - public async Task Test429sWithContinuationsAsync() - { - Implementation implementation = new Implementation(false); - await implementation.Test429sWithContinuationsAsync(false, false); - } - - [TestMethod] - [DataRow(false)] - [DataRow(true)] - public async Task TestEmptyPages(bool aggressivePrefetch) - { - Implementation implementation = new Implementation(false); - await implementation.TestEmptyPages(aggressivePrefetch); - } - - [TestMethod] - public async Task TestMergeToSinglePartition() - { - Implementation implementation = new Implementation(true); - await implementation.TestMergeToSinglePartition(); - } - - // Validates that on a merge (split with 1 result) we do not create new child enumerators for the merge result - [TestMethod] - public async Task OnMergeRequeueRange() - { - // We expect only creation of enumerators for the original ranges, not any child ranges - List createdEnumerators = new List(); - PartitionRangePageAsyncEnumerator createEnumerator( - FeedRangeState feedRangeState) - { - EnumeratorThatSplits enumerator = new EnumeratorThatSplits(feedRangeState, default, createdEnumerators.Count == 0); - createdEnumerators.Add(enumerator); - return enumerator; - } - - // We expect a request for children and we return the merged range - Mock feedRangeProvider = new Mock(); - feedRangeProvider.Setup(p => p.GetChildRangeAsync( - It.Is(splitRange => ((FeedRangeEpk)splitRange).Range.Min == "" && ((FeedRangeEpk)splitRange).Range.Max == "A"), - It.IsAny(), - It.IsAny())) - .ReturnsAsync(new List() { - FeedRangeEpk.FullRange}); - - CrossPartitionRangePageAsyncEnumerator enumerator = new CrossPartitionRangePageAsyncEnumerator( - feedRangeProvider: feedRangeProvider.Object, - createPartitionRangeEnumerator: createEnumerator, - comparer: null, - maxConcurrency: 0, - prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: default, - state: new CrossFeedRangeState( - new FeedRangeState[] - { - // start with 2 ranges - new FeedRangeState(new FeedRangeEpk(new Documents.Routing.Range("", "A", true, false)), ReadFeedState.Beginning()), - new FeedRangeState(new FeedRangeEpk(new Documents.Routing.Range("A", "FF", true, false)), ReadFeedState.Beginning()) - })); - - // Trigger merge, should requeue and read second enumerator - await enumerator.MoveNextAsync(NoOpTrace.Singleton); - - // Should read first enumerator again - await enumerator.MoveNextAsync(NoOpTrace.Singleton); - - Assert.AreEqual(2, createdEnumerators.Count, "Should only create the original 2 enumerators"); - Assert.AreEqual("", ((FeedRangeEpk)createdEnumerators[0].FeedRangeState.FeedRange).Range.Min); - Assert.AreEqual("A", ((FeedRangeEpk)createdEnumerators[0].FeedRangeState.FeedRange).Range.Max); - Assert.AreEqual("A", ((FeedRangeEpk)createdEnumerators[1].FeedRangeState.FeedRange).Range.Min); - Assert.AreEqual("FF", ((FeedRangeEpk)createdEnumerators[1].FeedRangeState.FeedRange).Range.Max); - - Assert.AreEqual(2, createdEnumerators[0].GetNextPageAsyncCounter, "First enumerator should have been requeued and called again"); - Assert.AreEqual(1, createdEnumerators[1].GetNextPageAsyncCounter, "Second enumerator should be used once"); - } - - // Validates that on a split we create children enumerators and use them - [TestMethod] - public async Task OnSplitQueueNewEnumerators() - { - // We expect creation of the initial full range enumerator and the 2 children - List createdEnumerators = new List(); - PartitionRangePageAsyncEnumerator createEnumerator( - FeedRangeState feedRangeState) - { - EnumeratorThatSplits enumerator = new EnumeratorThatSplits(feedRangeState, default, createdEnumerators.Count == 0); - createdEnumerators.Add(enumerator); - return enumerator; - } - - // We expect a request for children and we return the new children - Mock feedRangeProvider = new Mock(); - feedRangeProvider.Setup(p => p.GetChildRangeAsync( - It.Is(splitRange => ((FeedRangeEpk)splitRange).Range.Min == FeedRangeEpk.FullRange.Range.Min && ((FeedRangeEpk)splitRange).Range.Max == FeedRangeEpk.FullRange.Range.Max), - It.IsAny(), - It.IsAny())) - .ReturnsAsync(new List() { - new FeedRangeEpk(new Documents.Routing.Range("", "A", true, false)), - new FeedRangeEpk(new Documents.Routing.Range("A", "FF", true, false))}); - - CrossPartitionRangePageAsyncEnumerator enumerator = new CrossPartitionRangePageAsyncEnumerator( - feedRangeProvider: feedRangeProvider.Object, - createPartitionRangeEnumerator: createEnumerator, - comparer: null, - prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - maxConcurrency: 0, - cancellationToken: default, - state: new CrossFeedRangeState( - new FeedRangeState[] - { - // start with 1 range - new FeedRangeState(FeedRangeEpk.FullRange, ReadFeedState.Beginning()) - })); - - // Trigger split, should create children and call first children - await enumerator.MoveNextAsync(NoOpTrace.Singleton); - - // Should read second children - await enumerator.MoveNextAsync(NoOpTrace.Singleton); - - Assert.AreEqual(3, createdEnumerators.Count, "Should have the original enumerator and the children"); - Assert.AreEqual(FeedRangeEpk.FullRange.Range.Min, ((FeedRangeEpk)createdEnumerators[0].FeedRangeState.FeedRange).Range.Min); - Assert.AreEqual(FeedRangeEpk.FullRange.Range.Max, ((FeedRangeEpk)createdEnumerators[0].FeedRangeState.FeedRange).Range.Max); - Assert.AreEqual("", ((FeedRangeEpk)createdEnumerators[1].FeedRangeState.FeedRange).Range.Min); - Assert.AreEqual("A", ((FeedRangeEpk)createdEnumerators[1].FeedRangeState.FeedRange).Range.Max); - Assert.AreEqual("A", ((FeedRangeEpk)createdEnumerators[2].FeedRangeState.FeedRange).Range.Min); - Assert.AreEqual("FF", ((FeedRangeEpk)createdEnumerators[2].FeedRangeState.FeedRange).Range.Max); - - Assert.AreEqual(1, createdEnumerators[0].GetNextPageAsyncCounter, "First enumerator should have been called once"); - Assert.AreEqual(1, createdEnumerators[1].GetNextPageAsyncCounter, "Second enumerator should have been called once"); - Assert.AreEqual(1, createdEnumerators[2].GetNextPageAsyncCounter, "Second enumerator should not be used"); - } - - private class EnumeratorThatSplits : PartitionRangePageAsyncEnumerator - { - private readonly bool throwError; - - public EnumeratorThatSplits( - FeedRangeState feedRangeState, - CancellationToken cancellationToken, - bool throwError = true) - : base(feedRangeState, cancellationToken) - { - this.throwError = throwError; - } - - public override ValueTask DisposeAsync() - { - throw new NotImplementedException(); - } - - public int GetNextPageAsyncCounter { get; private set; } - - protected override Task> GetNextPageAsync(ITrace trace, CancellationToken cancellationToken) - { - this.GetNextPageAsyncCounter++; - - if (this.GetNextPageAsyncCounter == 1 - && this.throwError) - { - CosmosException splitError = new CosmosException("merge", System.Net.HttpStatusCode.Gone, (int)Documents.SubStatusCodes.PartitionKeyRangeGone, string.Empty, 0); - TryCatch state = TryCatch.FromException(splitError); - return Task.FromResult(state); - } - else - { - return Task.FromResult(TryCatch.FromResult( - new ReadFeedPage( - new MemoryStream(Encoding.UTF8.GetBytes("{\"Documents\": [], \"_count\": 0, \"_rid\": \"asdf\"}")), - requestCharge: 1, - activityId: Guid.NewGuid().ToString(), - additionalHeaders: null, - state: ReadFeedState.Beginning()))); - } - } - } - - [TestMethod] - [DataRow(false, false, false, DisplayName = "Use State: false, Allow Splits: false, Allow Merges: false")] - [DataRow(false, false, true, DisplayName = "Use State: false, Allow Splits: false, Allow Merges: true")] - [DataRow(false, true, false, DisplayName = "Use State: false, Allow Splits: true, Allow Merges: false")] - [DataRow(false, true, true, DisplayName = "Use State: false, Allow Splits: true, Allow Merges: true")] - [DataRow(true, false, false, DisplayName = "Use State: true, Allow Splits: false, Allow Merges: false")] - [DataRow(true, false, true, DisplayName = "Use State: true, Allow Splits: false, Allow Merges: true")] - [DataRow(true, true, false, DisplayName = "Use State: true, Allow Splits: true, Allow Merges: false")] - [DataRow(true, true, true, DisplayName = "Use State: true, Allow Splits: true, Allow Merges: true")] - public async Task TestSplitAndMergeAsync(bool useState, bool allowSplits, bool allowMerges) - { - Implementation implementation = new Implementation(singlePartition: false); - await implementation.TestSplitAndMergeImplementationAsync(useState, allowSplits, allowMerges); - } - - private sealed class Implementation : PartitionRangeEnumeratorTests, CrossFeedRangeState> - { - enum TriState { NotReady, Ready, Done }; - - public Implementation(bool singlePartition) - : base(singlePartition) - { - this.ShouldMerge = TriState.NotReady; - } - - private TriState ShouldMerge { get; set; } - - private IDocumentContainer DocumentContainer { get; set; } - - private async Task ShouldReturnFailure() - { - if (this.ShouldMerge == TriState.Ready) - { - await this.DocumentContainer.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); - List ranges = await this.DocumentContainer.GetFeedRangesAsync( - trace: NoOpTrace.Singleton, - cancellationToken: default); - - await this.DocumentContainer.MergeAsync(ranges[0], ranges[1], default); - await this.DocumentContainer.RefreshProviderAsync(NoOpTrace.Singleton, default); - this.ShouldMerge = TriState.Done; - - return new CosmosException( - message: "PKRange was split/merged", - statusCode: System.Net.HttpStatusCode.Gone, - subStatusCode: (int)Documents.SubStatusCodes.PartitionKeyRangeGone, - activityId: "BC0CCDA5-D378-4922-B8B0-D51D745B9139", - requestCharge: 0.0); - } - else - { - return null; - } - } - - public async Task TestMergeToSinglePartition() - { - int numItems = 1000; - FlakyDocumentContainer.FailureConfigs config = new FlakyDocumentContainer.FailureConfigs( - inject429s: false, - injectEmptyPages: false, - shouldReturnFailure: this.ShouldReturnFailure); - - this.DocumentContainer = await this.CreateDocumentContainerAsync(numItems: numItems, failureConfigs: config); - - await this.DocumentContainer.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); - List ranges = await this.DocumentContainer.GetFeedRangesAsync( - trace: NoOpTrace.Singleton, - cancellationToken: default); - await this.DocumentContainer.SplitAsync(ranges.First(), cancellationToken: default); - - IAsyncEnumerator>> enumerator = await this.CreateEnumeratorAsync(this.DocumentContainer); - List identifiers = new List(); - int iteration = 0; - while (await enumerator.MoveNextAsync()) - { - TryCatch> tryGetPage = enumerator.Current; - tryGetPage.ThrowIfFailed(); - - IReadOnlyList records = this.GetRecordsFromPage(tryGetPage.Result); - foreach (Record record in records) - { - identifiers.Add(record.Payload["pk"].ToString()); - } - - ++iteration; - if (iteration == 1) - { - this.ShouldMerge = TriState.Ready; - } - } - - Assert.AreEqual(numItems, identifiers.Count); - } - - public async Task TestSplitAndMergeImplementationAsync( - bool useState, - bool allowSplits, - bool allowMerges) - { - int numItems = 1000; - IDocumentContainer inMemoryCollection = await this.CreateDocumentContainerAsync(numItems); - IAsyncEnumerator>> enumerator = await this.CreateEnumeratorAsync(inMemoryCollection); - HashSet identifiers = new HashSet(); - Random random = new Random(); - while (await enumerator.MoveNextAsync()) - { - TryCatch> tryGetPage = enumerator.Current; - tryGetPage.ThrowIfFailed(); - - IReadOnlyList records = this.GetRecordsFromPage(tryGetPage.Result); - foreach (Record record in records) - { - identifiers.Add(record.Payload["pk"].ToString()); - } - - if (useState) - { - if (tryGetPage.Result.State == null) - { - break; - } - - enumerator = await this.CreateEnumeratorAsync( - inMemoryCollection, - false, - false, - tryGetPage.Result.State); - } - - if (random.Next() % 2 == 0) - { - if (allowSplits && (random.Next() % 2 == 0)) - { - // Split - await inMemoryCollection.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); - List ranges = await inMemoryCollection.GetFeedRangesAsync( - trace: NoOpTrace.Singleton, - cancellationToken: default); - FeedRangeInternal randomRangeToSplit = ranges[random.Next(0, ranges.Count)]; - await inMemoryCollection.SplitAsync(randomRangeToSplit, cancellationToken: default); - } - - if (allowMerges && (random.Next() % 2 == 0)) - { - // Merge - await inMemoryCollection.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); - List ranges = await inMemoryCollection.GetFeedRangesAsync( - trace: NoOpTrace.Singleton, - cancellationToken: default); - if (ranges.Count > 1) - { - ranges = ranges.OrderBy(range => range.Range.Min).ToList(); - int indexToMerge = random.Next(0, ranges.Count); - int adjacentIndex = indexToMerge == (ranges.Count - 1) ? indexToMerge - 1 : indexToMerge + 1; - await inMemoryCollection.MergeAsync(ranges[indexToMerge], ranges[adjacentIndex], cancellationToken: default); - } - } - } - } - - Assert.AreEqual(numItems, identifiers.Count); - } - - protected override IAsyncEnumerable>> CreateEnumerable( - IDocumentContainer inMemoryCollection, - bool aggressivePrefetch = false, - CrossFeedRangeState state = null) - { - PartitionRangePageAsyncEnumerator createEnumerator( - FeedRangeState feedRangeState) => new ReadFeedPartitionRangeEnumerator( - inMemoryCollection, - feedRangeState: feedRangeState, - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default); - - return new CrossPartitionRangePageAsyncEnumerable( - feedRangeProvider: inMemoryCollection, - createPartitionRangeEnumerator: createEnumerator, - comparer: PartitionRangePageAsyncEnumeratorComparer.Singleton, - maxConcurrency: 10, - prefetchPolicy: aggressivePrefetch ? PrefetchPolicy.PrefetchAll : PrefetchPolicy.PrefetchSinglePage, - trace: NoOpTrace.Singleton, - state: state ?? new CrossFeedRangeState( - new FeedRangeState[] - { - new FeedRangeState(FeedRangeEpk.FullRange, ReadFeedState.Beginning()) - })); - } - - protected override Task>>> CreateEnumeratorAsync( - IDocumentContainer inMemoryCollection, - bool aggressivePrefetch = false, - bool exercisePrefetch = false, - CrossFeedRangeState state = null, - CancellationToken cancellationToken = default) - { - PartitionRangePageAsyncEnumerator createEnumerator( - FeedRangeState feedRangeState) - { - return new ReadFeedPartitionRangeEnumerator( - inMemoryCollection, - feedRangeState: feedRangeState, - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default); - } - - IAsyncEnumerator>> enumerator = new TracingAsyncEnumerator>>( - new CrossPartitionRangePageAsyncEnumerator( - feedRangeProvider: inMemoryCollection, - createPartitionRangeEnumerator: createEnumerator, - comparer: PartitionRangePageAsyncEnumeratorComparer.Singleton, - maxConcurrency: 10, - prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: cancellationToken, - state: state ?? new CrossFeedRangeState( - new FeedRangeState[] - { - new FeedRangeState(FeedRangeEpk.FullRange, ReadFeedState.Beginning()) - })), - NoOpTrace.Singleton); - - return Task.FromResult(enumerator); - } - - public override IReadOnlyList GetRecordsFromPage(CrossFeedRangePage page) - { - return page.Page.GetRecords(); - } - - private sealed class PartitionRangePageAsyncEnumeratorComparer : IComparer> - { - public static readonly PartitionRangePageAsyncEnumeratorComparer Singleton = new PartitionRangePageAsyncEnumeratorComparer(); - - public int Compare( - PartitionRangePageAsyncEnumerator partitionRangePageEnumerator1, - PartitionRangePageAsyncEnumerator partitionRangePageEnumerator2) - { - if (object.ReferenceEquals(partitionRangePageEnumerator1, partitionRangePageEnumerator2)) - { - return 0; - } - - // Either both don't have results or both do. - return string.CompareOrdinal( - ((FeedRangeEpk)partitionRangePageEnumerator1.FeedRangeState.FeedRange).Range.Min, - ((FeedRangeEpk)partitionRangePageEnumerator2.FeedRangeState.FeedRange).Range.Min); - } - } - } - } -} +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Tests.Pagination +{ + using System; + using System.Collections.Generic; + using System.IO; + using System.Linq; + using System.Text; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos.Pagination; + using Microsoft.Azure.Cosmos.Query.Core.Monads; + using Microsoft.Azure.Cosmos.ReadFeed.Pagination; + using Microsoft.Azure.Cosmos.Tests.Query.Pipeline; + using Microsoft.Azure.Cosmos.Tracing; + using Microsoft.VisualStudio.TestTools.UnitTesting; + using Moq; + + [TestClass] + public sealed class CrossPartitionPartitionRangeEnumeratorTests + { + [TestMethod] + public async Task Test429sAsync() + { + Implementation implementation = new Implementation(false); + await implementation.Test429sAsync(false); + } + + [TestMethod] + public async Task Test429sWithContinuationsAsync() + { + Implementation implementation = new Implementation(false); + await implementation.Test429sWithContinuationsAsync(false, false); + } + + [TestMethod] + [DataRow(false)] + [DataRow(true)] + public async Task TestEmptyPages(bool aggressivePrefetch) + { + Implementation implementation = new Implementation(false); + await implementation.TestEmptyPages(aggressivePrefetch); + } + + [TestMethod] + public async Task TestMergeToSinglePartition() + { + Implementation implementation = new Implementation(true); + await implementation.TestMergeToSinglePartition(); + } + + // Validates that on a merge (split with 1 result) we do not create new child enumerators for the merge result + [TestMethod] + public async Task OnMergeRequeueRange() + { + // We expect only creation of enumerators for the original ranges, not any child ranges + List createdEnumerators = new List(); + PartitionRangePageAsyncEnumerator createEnumerator( + FeedRangeState feedRangeState) + { + EnumeratorThatSplits enumerator = new EnumeratorThatSplits(feedRangeState, createdEnumerators.Count == 0); + createdEnumerators.Add(enumerator); + return enumerator; + } + + // We expect a request for children and we return the merged range + Mock feedRangeProvider = new Mock(); + feedRangeProvider.Setup(p => p.GetChildRangeAsync( + It.Is(splitRange => ((FeedRangeEpk)splitRange).Range.Min == "" && ((FeedRangeEpk)splitRange).Range.Max == "A"), + It.IsAny(), + It.IsAny())) + .ReturnsAsync(new List() { + FeedRangeEpk.FullRange}); + + CrossPartitionRangePageAsyncEnumerator enumerator = new CrossPartitionRangePageAsyncEnumerator( + feedRangeProvider: feedRangeProvider.Object, + createPartitionRangeEnumerator: createEnumerator, + comparer: null, + maxConcurrency: 0, + prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, + state: new CrossFeedRangeState( + new FeedRangeState[] + { + // start with 2 ranges + new FeedRangeState(new FeedRangeEpk(new Documents.Routing.Range("", "A", true, false)), ReadFeedState.Beginning()), + new FeedRangeState(new FeedRangeEpk(new Documents.Routing.Range("A", "FF", true, false)), ReadFeedState.Beginning()) + })); + + // Trigger merge, should requeue and read second enumerator + await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default); + + // Should read first enumerator again + await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default); + + Assert.AreEqual(2, createdEnumerators.Count, "Should only create the original 2 enumerators"); + Assert.AreEqual("", ((FeedRangeEpk)createdEnumerators[0].FeedRangeState.FeedRange).Range.Min); + Assert.AreEqual("A", ((FeedRangeEpk)createdEnumerators[0].FeedRangeState.FeedRange).Range.Max); + Assert.AreEqual("A", ((FeedRangeEpk)createdEnumerators[1].FeedRangeState.FeedRange).Range.Min); + Assert.AreEqual("FF", ((FeedRangeEpk)createdEnumerators[1].FeedRangeState.FeedRange).Range.Max); + + Assert.AreEqual(2, createdEnumerators[0].GetNextPageAsyncCounter, "First enumerator should have been requeued and called again"); + Assert.AreEqual(1, createdEnumerators[1].GetNextPageAsyncCounter, "Second enumerator should be used once"); + } + + // Validates that on a split we create children enumerators and use them + [TestMethod] + public async Task OnSplitQueueNewEnumerators() + { + // We expect creation of the initial full range enumerator and the 2 children + List createdEnumerators = new List(); + PartitionRangePageAsyncEnumerator createEnumerator( + FeedRangeState feedRangeState) + { + EnumeratorThatSplits enumerator = new EnumeratorThatSplits(feedRangeState, createdEnumerators.Count == 0); + createdEnumerators.Add(enumerator); + return enumerator; + } + + // We expect a request for children and we return the new children + Mock feedRangeProvider = new Mock(); + feedRangeProvider.Setup(p => p.GetChildRangeAsync( + It.Is(splitRange => ((FeedRangeEpk)splitRange).Range.Min == FeedRangeEpk.FullRange.Range.Min && ((FeedRangeEpk)splitRange).Range.Max == FeedRangeEpk.FullRange.Range.Max), + It.IsAny(), + It.IsAny())) + .ReturnsAsync(new List() { + new FeedRangeEpk(new Documents.Routing.Range("", "A", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("A", "FF", true, false))}); + + CrossPartitionRangePageAsyncEnumerator enumerator = new CrossPartitionRangePageAsyncEnumerator( + feedRangeProvider: feedRangeProvider.Object, + createPartitionRangeEnumerator: createEnumerator, + comparer: null, + prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, + maxConcurrency: 0, + state: new CrossFeedRangeState( + new FeedRangeState[] + { + // start with 1 range + new FeedRangeState(FeedRangeEpk.FullRange, ReadFeedState.Beginning()) + })); + + // Trigger split, should create children and call first children + await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default); + + // Should read second children + await enumerator.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default); + + Assert.AreEqual(3, createdEnumerators.Count, "Should have the original enumerator and the children"); + Assert.AreEqual(FeedRangeEpk.FullRange.Range.Min, ((FeedRangeEpk)createdEnumerators[0].FeedRangeState.FeedRange).Range.Min); + Assert.AreEqual(FeedRangeEpk.FullRange.Range.Max, ((FeedRangeEpk)createdEnumerators[0].FeedRangeState.FeedRange).Range.Max); + Assert.AreEqual("", ((FeedRangeEpk)createdEnumerators[1].FeedRangeState.FeedRange).Range.Min); + Assert.AreEqual("A", ((FeedRangeEpk)createdEnumerators[1].FeedRangeState.FeedRange).Range.Max); + Assert.AreEqual("A", ((FeedRangeEpk)createdEnumerators[2].FeedRangeState.FeedRange).Range.Min); + Assert.AreEqual("FF", ((FeedRangeEpk)createdEnumerators[2].FeedRangeState.FeedRange).Range.Max); + + Assert.AreEqual(1, createdEnumerators[0].GetNextPageAsyncCounter, "First enumerator should have been called once"); + Assert.AreEqual(1, createdEnumerators[1].GetNextPageAsyncCounter, "Second enumerator should have been called once"); + Assert.AreEqual(1, createdEnumerators[2].GetNextPageAsyncCounter, "Second enumerator should not be used"); + } + + private class EnumeratorThatSplits : PartitionRangePageAsyncEnumerator + { + private readonly bool throwError; + + public EnumeratorThatSplits( + FeedRangeState feedRangeState, + bool throwError = true) + : base(feedRangeState) + { + this.throwError = throwError; + } + + public override ValueTask DisposeAsync() + { + throw new NotImplementedException(); + } + + public int GetNextPageAsyncCounter { get; private set; } + + protected override Task> GetNextPageAsync(ITrace trace, CancellationToken cancellationToken) + { + this.GetNextPageAsyncCounter++; + + if (this.GetNextPageAsyncCounter == 1 + && this.throwError) + { + CosmosException splitError = new CosmosException("merge", System.Net.HttpStatusCode.Gone, (int)Documents.SubStatusCodes.PartitionKeyRangeGone, string.Empty, 0); + TryCatch state = TryCatch.FromException(splitError); + return Task.FromResult(state); + } + else + { + return Task.FromResult(TryCatch.FromResult( + new ReadFeedPage( + new MemoryStream(Encoding.UTF8.GetBytes("{\"Documents\": [], \"_count\": 0, \"_rid\": \"asdf\"}")), + requestCharge: 1, + itemCount: 0, + activityId: Guid.NewGuid().ToString(), + additionalHeaders: null, + state: ReadFeedState.Beginning()))); + } + } + } + + [TestMethod] + [DataRow(false, false, false, DisplayName = "Use State: false, Allow Splits: false, Allow Merges: false")] + [DataRow(false, false, true, DisplayName = "Use State: false, Allow Splits: false, Allow Merges: true")] + [DataRow(false, true, false, DisplayName = "Use State: false, Allow Splits: true, Allow Merges: false")] + [DataRow(false, true, true, DisplayName = "Use State: false, Allow Splits: true, Allow Merges: true")] + [DataRow(true, false, false, DisplayName = "Use State: true, Allow Splits: false, Allow Merges: false")] + [DataRow(true, false, true, DisplayName = "Use State: true, Allow Splits: false, Allow Merges: true")] + [DataRow(true, true, false, DisplayName = "Use State: true, Allow Splits: true, Allow Merges: false")] + [DataRow(true, true, true, DisplayName = "Use State: true, Allow Splits: true, Allow Merges: true")] + public async Task TestSplitAndMergeAsync(bool useState, bool allowSplits, bool allowMerges) + { + Implementation implementation = new Implementation(singlePartition: false); + await implementation.TestSplitAndMergeImplementationAsync(useState, allowSplits, allowMerges); + } + + private sealed class Implementation : PartitionRangeEnumeratorTests, CrossFeedRangeState> + { + enum TriState { NotReady, Ready, Done }; + + public Implementation(bool singlePartition) + : base(singlePartition) + { + this.ShouldMerge = TriState.NotReady; + } + + private TriState ShouldMerge { get; set; } + + private IDocumentContainer DocumentContainer { get; set; } + + private async Task ShouldReturnFailure() + { + if (this.ShouldMerge == TriState.Ready) + { + await this.DocumentContainer.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); + List ranges = await this.DocumentContainer.GetFeedRangesAsync( + trace: NoOpTrace.Singleton, + cancellationToken: default); + + await this.DocumentContainer.MergeAsync(ranges[0], ranges[1], default); + await this.DocumentContainer.RefreshProviderAsync(NoOpTrace.Singleton, default); + this.ShouldMerge = TriState.Done; + + return new CosmosException( + message: "PKRange was split/merged", + statusCode: System.Net.HttpStatusCode.Gone, + subStatusCode: (int)Documents.SubStatusCodes.PartitionKeyRangeGone, + activityId: "BC0CCDA5-D378-4922-B8B0-D51D745B9139", + requestCharge: 0.0); + } + else + { + return null; + } + } + + public async Task TestMergeToSinglePartition() + { + int numItems = 1000; + FlakyDocumentContainer.FailureConfigs config = new FlakyDocumentContainer.FailureConfigs( + inject429s: false, + injectEmptyPages: false, + shouldReturnFailure: this.ShouldReturnFailure); + + this.DocumentContainer = await this.CreateDocumentContainerAsync(numItems: numItems, failureConfigs: config); + + await this.DocumentContainer.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); + List ranges = await this.DocumentContainer.GetFeedRangesAsync( + trace: NoOpTrace.Singleton, + cancellationToken: default); + await this.DocumentContainer.SplitAsync(ranges.First(), cancellationToken: default); + + IAsyncEnumerator>> enumerator = await this.CreateEnumeratorAsync(this.DocumentContainer); + List identifiers = new List(); + int iteration = 0; + while (await enumerator.MoveNextAsync()) + { + TryCatch> tryGetPage = enumerator.Current; + tryGetPage.ThrowIfFailed(); + + IReadOnlyList records = this.GetRecordsFromPage(tryGetPage.Result); + foreach (Record record in records) + { + identifiers.Add(record.Payload["pk"].ToString()); + } + + ++iteration; + if (iteration == 1) + { + this.ShouldMerge = TriState.Ready; + } + } + + Assert.AreEqual(numItems, identifiers.Count); + } + + public async Task TestSplitAndMergeImplementationAsync( + bool useState, + bool allowSplits, + bool allowMerges) + { + int numItems = 1000; + IDocumentContainer inMemoryCollection = await this.CreateDocumentContainerAsync(numItems); + IAsyncEnumerator>> enumerator = await this.CreateEnumeratorAsync(inMemoryCollection); + HashSet identifiers = new HashSet(); + Random random = new Random(); + while (await enumerator.MoveNextAsync()) + { + TryCatch> tryGetPage = enumerator.Current; + tryGetPage.ThrowIfFailed(); + + IReadOnlyList records = this.GetRecordsFromPage(tryGetPage.Result); + foreach (Record record in records) + { + identifiers.Add(record.Payload["pk"].ToString()); + } + + if (useState) + { + if (tryGetPage.Result.State == null) + { + break; + } + + enumerator = await this.CreateEnumeratorAsync( + inMemoryCollection, + false, + false, + tryGetPage.Result.State); + } + + if (random.Next() % 2 == 0) + { + if (allowSplits && (random.Next() % 2 == 0)) + { + // Split + await inMemoryCollection.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); + List ranges = await inMemoryCollection.GetFeedRangesAsync( + trace: NoOpTrace.Singleton, + cancellationToken: default); + FeedRangeInternal randomRangeToSplit = ranges[random.Next(0, ranges.Count)]; + await inMemoryCollection.SplitAsync(randomRangeToSplit, cancellationToken: default); + } + + if (allowMerges && (random.Next() % 2 == 0)) + { + // Merge + await inMemoryCollection.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); + List ranges = await inMemoryCollection.GetFeedRangesAsync( + trace: NoOpTrace.Singleton, + cancellationToken: default); + if (ranges.Count > 1) + { + ranges = ranges.OrderBy(range => range.Range.Min).ToList(); + int indexToMerge = random.Next(0, ranges.Count); + int adjacentIndex = indexToMerge == (ranges.Count - 1) ? indexToMerge - 1 : indexToMerge + 1; + await inMemoryCollection.MergeAsync(ranges[indexToMerge], ranges[adjacentIndex], cancellationToken: default); + } + } + } + } + + Assert.AreEqual(numItems, identifiers.Count); + } + + protected override IAsyncEnumerable>> CreateEnumerable( + IDocumentContainer inMemoryCollection, + bool aggressivePrefetch = false, + CrossFeedRangeState state = null) + { + PartitionRangePageAsyncEnumerator createEnumerator( + FeedRangeState feedRangeState) => new ReadFeedPartitionRangeEnumerator( + inMemoryCollection, + feedRangeState: feedRangeState, + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10)); + + return new CrossPartitionRangePageAsyncEnumerable( + feedRangeProvider: inMemoryCollection, + createPartitionRangeEnumerator: createEnumerator, + comparer: PartitionRangePageAsyncEnumeratorComparer.Singleton, + maxConcurrency: 10, + prefetchPolicy: aggressivePrefetch ? PrefetchPolicy.PrefetchAll : PrefetchPolicy.PrefetchSinglePage, + trace: NoOpTrace.Singleton, + state: state ?? new CrossFeedRangeState( + new FeedRangeState[] + { + new FeedRangeState(FeedRangeEpk.FullRange, ReadFeedState.Beginning()) + })); + } + + protected override Task>>> CreateEnumeratorAsync( + IDocumentContainer inMemoryCollection, + bool aggressivePrefetch = false, + bool exercisePrefetch = false, + CrossFeedRangeState state = null, + CancellationToken cancellationToken = default) + { + PartitionRangePageAsyncEnumerator createEnumerator( + FeedRangeState feedRangeState) + { + return new ReadFeedPartitionRangeEnumerator( + inMemoryCollection, + feedRangeState: feedRangeState, + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10)); + } + + IAsyncEnumerator>> enumerator = new TracingAsyncEnumerator>>( + new CrossPartitionRangePageAsyncEnumerator( + feedRangeProvider: inMemoryCollection, + createPartitionRangeEnumerator: createEnumerator, + comparer: PartitionRangePageAsyncEnumeratorComparer.Singleton, + maxConcurrency: 10, + prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, + state: state ?? new CrossFeedRangeState( + new FeedRangeState[] + { + new FeedRangeState(FeedRangeEpk.FullRange, ReadFeedState.Beginning()) + })), + NoOpTrace.Singleton, + cancellationToken: default); + + return Task.FromResult(enumerator); + } + + public override IReadOnlyList GetRecordsFromPage(CrossFeedRangePage page) + { + return page.Page.GetRecords(); + } + + private sealed class PartitionRangePageAsyncEnumeratorComparer : IComparer> + { + public static readonly PartitionRangePageAsyncEnumeratorComparer Singleton = new PartitionRangePageAsyncEnumeratorComparer(); + + public int Compare( + PartitionRangePageAsyncEnumerator partitionRangePageEnumerator1, + PartitionRangePageAsyncEnumerator partitionRangePageEnumerator2) + { + if (object.ReferenceEquals(partitionRangePageEnumerator1, partitionRangePageEnumerator2)) + { + return 0; + } + + // Either both don't have results or both do. + return string.CompareOrdinal( + ((FeedRangeEpk)partitionRangePageEnumerator1.FeedRangeState.FeedRange).Range.Min, + ((FeedRangeEpk)partitionRangePageEnumerator2.FeedRangeState.FeedRange).Range.Min); + } + } + } + } +} diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/FlakyDocumentContainer.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/FlakyDocumentContainer.cs index 132d575905..6ce08b615a 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/FlakyDocumentContainer.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/FlakyDocumentContainer.cs @@ -125,7 +125,8 @@ public async Task> MonadicReadFeedAsync( return TryCatch.FromResult( new ReadFeedPage( new MemoryStream(Encoding.UTF8.GetBytes("{\"Documents\": [], \"_count\": 0, \"_rid\": \"asdf\"}")), - requestCharge: 42, + requestCharge: 42, + itemCount: 0, activityId: Guid.NewGuid().ToString(), additionalHeaders: null, state: nonNullState)); @@ -168,12 +169,12 @@ public async Task> MonadicQueryAsync( documents: new List(), requestCharge: 42, activityId: Guid.NewGuid().ToString(), - responseLengthInBytes: "[]".Length, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: default, - state: feedRangeState.State ?? StateForStartedButNoDocumentsReturned)); + state: feedRangeState.State ?? StateForStartedButNoDocumentsReturned, + streaming: default)); } Exception failure = await this.ShouldReturnFailure(); @@ -206,7 +207,8 @@ public async Task> MonadicChangeFeedAsync( return TryCatch.FromResult( new ChangeFeedSuccessPage( content: new MemoryStream(Encoding.UTF8.GetBytes("{\"Documents\": [], \"_count\": 0, \"_rid\": \"asdf\"}")), - requestCharge: 42, + requestCharge: 42, + itemCount: 0, activityId: Guid.NewGuid().ToString(), additionalHeaders: default, state: feedRangeState.State)); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/InMemoryContainer.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/InMemoryContainer.cs index ad17a6bac6..bfcfb0c725 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/InMemoryContainer.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/InMemoryContainer.cs @@ -1,1664 +1,1666 @@ -//------------------------------------------------------------ -// Copyright (c) Microsoft Corporation. All rights reserved. -//------------------------------------------------------------ - -namespace Microsoft.Azure.Cosmos.Tests.Pagination -{ - using System; - using System.Collections; - using System.Collections.Generic; - using System.Collections.Immutable; - using System.IO; - using System.Linq; - using System.Reflection; - using System.Threading; - using System.Threading.Tasks; - using Microsoft.Azure.Cosmos; - using Microsoft.Azure.Cosmos.ChangeFeed.Pagination; - using Microsoft.Azure.Cosmos.CosmosElements; - using Microsoft.Azure.Cosmos.CosmosElements.Numbers; - using Microsoft.Azure.Cosmos.Json; - using Microsoft.Azure.Cosmos.Pagination; - using Microsoft.Azure.Cosmos.Query.Core; - using Microsoft.Azure.Cosmos.Query.Core.Monads; - using Microsoft.Azure.Cosmos.Query.Core.Parser; - using Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy; - using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Distinct; - using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; - using Microsoft.Azure.Cosmos.ReadFeed.Pagination; - using Microsoft.Azure.Cosmos.Routing; - using Microsoft.Azure.Cosmos.Serialization.HybridRow; - using Microsoft.Azure.Cosmos.SqlObjects; - using Microsoft.Azure.Cosmos.Tests.Query.OfflineEngine; - using Microsoft.Azure.Cosmos.Tracing; - using Microsoft.Azure.Documents; - using static Microsoft.Azure.Cosmos.Query.Core.SqlQueryResumeFilter; - using ResourceIdentifier = Cosmos.Pagination.ResourceIdentifier; - using UInt128 = UInt128; - - // Collection useful for mocking requests and repartitioning (splits / merge). - internal class InMemoryContainer : IMonadicDocumentContainer - { - private readonly PartitionKeyDefinition partitionKeyDefinition; - private readonly Dictionary parentToChildMapping; - - private PartitionKeyHashRangeDictionary partitionedRecords; - private PartitionKeyHashRangeDictionary> partitionedChanges; - private Dictionary partitionKeyRangeIdToHashRange; - private Dictionary cachedPartitionKeyRangeIdToHashRange; - - public InMemoryContainer( - PartitionKeyDefinition partitionKeyDefinition) - { - this.partitionKeyDefinition = partitionKeyDefinition ?? throw new ArgumentNullException(nameof(partitionKeyDefinition)); - PartitionKeyHashRange fullRange = new PartitionKeyHashRange(startInclusive: null, endExclusive: new PartitionKeyHash(Cosmos.UInt128.MaxValue)); - PartitionKeyHashRanges partitionKeyHashRanges = PartitionKeyHashRanges.Create(new PartitionKeyHashRange[] { fullRange }); - this.partitionedRecords = new PartitionKeyHashRangeDictionary(partitionKeyHashRanges); - this.partitionedRecords[fullRange] = new Records(); - this.partitionedChanges = new PartitionKeyHashRangeDictionary>(partitionKeyHashRanges); - this.partitionedChanges[fullRange] = new List(); - this.partitionKeyRangeIdToHashRange = new Dictionary() - { - { 0, fullRange } - }; - this.cachedPartitionKeyRangeIdToHashRange = new Dictionary() - { - { 0, fullRange } - }; - this.parentToChildMapping = new Dictionary(); - } - - public Task>> MonadicGetFeedRangesAsync( - ITrace trace, - CancellationToken cancellationToken) => this.MonadicGetChildRangeAsync( - FeedRangeEpk.FullRange, - trace, - cancellationToken); - - public async Task>> MonadicGetChildRangeAsync( - FeedRangeInternal feedRange, - ITrace trace, - CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - - if (feedRange == null) - { - throw new ArgumentNullException(nameof(feedRange)); - } - - if (trace == null) - { - throw new ArgumentNullException(nameof(trace)); - } - - using (ITrace getChildRangesTrace = trace.StartChild(name: "Get Child Ranges", TraceComponent.Routing, TraceLevel.Info)) - { - FeedRangeEpk CreateRangeFromId(int id) - { - PartitionKeyHashRange hashRange = this.cachedPartitionKeyRangeIdToHashRange[id]; - return new FeedRangeEpk( - new Documents.Routing.Range( - min: hashRange.StartInclusive.HasValue ? hashRange.StartInclusive.Value.ToString() : string.Empty, - max: hashRange.EndExclusive.HasValue ? hashRange.EndExclusive.Value.ToString() : string.Empty, - isMinInclusive: true, - isMaxInclusive: false)); - } - - if (feedRange is FeedRangePartitionKey) - { - throw new ArgumentException("Can not get the child of a logical partition key"); - } - - if (feedRange.Equals(FeedRangeEpk.FullRange)) - { - List ranges = new List(); - foreach (int id in this.cachedPartitionKeyRangeIdToHashRange.Keys) - { - ranges.Add(CreateRangeFromId(id)); - } - - return TryCatch>.FromResult(ranges); - } - - if (feedRange is FeedRangeEpk feedRangeEpk) - { - // look for overlapping epk ranges. - List overlappingRanges; - if (feedRangeEpk.Range.Min.Equals(FeedRangeEpk.FullRange.Range.Min) && feedRangeEpk.Range.Max.Equals(FeedRangeEpk.FullRange.Range.Max)) - { - overlappingRanges = this.cachedPartitionKeyRangeIdToHashRange.Select(kvp => CreateRangeFromId(kvp.Key)).ToList(); - } - else - { - overlappingRanges = new List(); - PartitionKeyHashRange userRange = FeedRangeEpkToHashRange(feedRangeEpk); - foreach (PartitionKeyHashRange systemRange in this.cachedPartitionKeyRangeIdToHashRange.Values) - { - if (userRange.TryGetOverlappingRange(systemRange, out PartitionKeyHashRange overlappingRange)) - { - overlappingRanges.Add(HashRangeToFeedRangeEpk(overlappingRange)); - } - } - } - - if (overlappingRanges.Count == 0) - { - return TryCatch>.FromException( - new KeyNotFoundException( - $"PartitionKeyRangeId: {feedRangeEpk} does not exist.")); - } - - return TryCatch>.FromResult(overlappingRanges); - } - - if (!(feedRange is FeedRangePartitionKeyRange feedRangePartitionKeyRange)) - { - throw new InvalidOperationException("Expected feed range to be a partition key range at this point."); - } - - if (!int.TryParse(feedRangePartitionKeyRange.PartitionKeyRangeId, out int partitionKeyRangeId)) - { - return TryCatch>.FromException( - new FormatException( - $"PartitionKeyRangeId: {feedRangePartitionKeyRange.PartitionKeyRangeId} is not an integer.")); - } - - if (!this.parentToChildMapping.TryGetValue(partitionKeyRangeId, out (int left, int right) children)) - { - // This range has no children (base case) - if (!this.cachedPartitionKeyRangeIdToHashRange.TryGetValue(partitionKeyRangeId, out PartitionKeyHashRange hashRange)) - { - return TryCatch>.FromException( - new KeyNotFoundException( - $"PartitionKeyRangeId: {partitionKeyRangeId} does not exist.")); - } - - List singleRange = new List() - { - CreateRangeFromId(partitionKeyRangeId), - }; - - return TryCatch>.FromResult(singleRange); - } - - // Recurse on the left and right child. - FeedRangeInternal left = new FeedRangePartitionKeyRange(children.left.ToString()); - FeedRangeInternal right = new FeedRangePartitionKeyRange(children.right.ToString()); - - TryCatch> tryGetLeftRanges = await this.MonadicGetChildRangeAsync(left, trace, cancellationToken); - if (tryGetLeftRanges.Failed) - { - return tryGetLeftRanges; - } - - TryCatch> tryGetRightRanges = await this.MonadicGetChildRangeAsync(right, trace, cancellationToken); - if (tryGetRightRanges.Failed) - { - return tryGetRightRanges; - } - - List recursiveOverlappingRanges = tryGetLeftRanges.Result.Concat(tryGetRightRanges.Result).ToList(); - return TryCatch>.FromResult(recursiveOverlappingRanges); - } - } - - public Task MonadicRefreshProviderAsync( - ITrace trace, - CancellationToken cancellationToken) - { - using (ITrace refreshProviderTrace = trace.StartChild("Refreshing FeedRangeProvider", TraceComponent.Routing, TraceLevel.Info)) - { - this.cachedPartitionKeyRangeIdToHashRange = new Dictionary(this.partitionKeyRangeIdToHashRange); - return Task.FromResult(TryCatch.FromResult()); - } - } - - public Task> MonadicCreateItemAsync( - CosmosObject payload, - CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - - if (payload == null) - { - throw new ArgumentNullException(nameof(payload)); - } - - PartitionKeyHash partitionKeyHash = GetHashFromPayload(payload, this.partitionKeyDefinition); - if (!this.partitionedRecords.TryGetValue(partitionKeyHash, out Records records)) - { - records = new Records(); - this.partitionedRecords[partitionKeyHash] = records; - } - - int? pkrangeid = null; - foreach (KeyValuePair kvp in this.partitionKeyRangeIdToHashRange) - { - if (kvp.Value.Contains(partitionKeyHash)) - { - pkrangeid = kvp.Key; - } - } - - if (!pkrangeid.HasValue) - { - throw new InvalidOperationException(); - } - - Record recordAdded = records.Add(pkrangeid.Value, payload); - - if (!this.partitionedChanges.TryGetValue(partitionKeyHash, out List changes)) - { - changes = new List(); - this.partitionedChanges[partitionKeyHash] = changes; - } - - ulong maxLogicalSequenceNumber = changes.Count == 0 ? 0 : changes.Select(change => change.LogicalSequenceNumber).Max(); - - Change change = new Change( - recordAdded, - partitionKeyRangeId: (ulong)pkrangeid.Value, - logicalSequenceNumber: maxLogicalSequenceNumber + 1); - - changes.Add(change); - return Task.FromResult(TryCatch.FromResult(recordAdded)); - } - - public Task> MonadicReadItemAsync( - CosmosElement partitionKey, - string identifier, - CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - - static Task> CreateNotFoundException(CosmosElement partitionKey, string identifer) - { - return Task.FromResult( - TryCatch.FromException( - new CosmosException( - message: $"Document with partitionKey: {partitionKey?.ToString() ?? "UNDEFINED"} and id: {identifer} not found.", - statusCode: System.Net.HttpStatusCode.NotFound, - subStatusCode: default, - activityId: Guid.NewGuid().ToString(), - requestCharge: 42))); - } - - PartitionKeyHash partitionKeyHash = GetHashFromPartitionKeys( - new List { partitionKey }, - this.partitionKeyDefinition); - - if (!this.partitionedRecords.TryGetValue(partitionKeyHash, out Records records)) - { - return CreateNotFoundException(partitionKey, identifier); - } - - foreach (Record candidate in records) - { - bool identifierMatches = candidate.Identifier == identifier; - - CosmosElement candidatePartitionKey = GetPartitionKeyFromPayload( - candidate.Payload, - this.partitionKeyDefinition); - - bool partitionKeyMatches; - if (candidatePartitionKey is null && partitionKey is null) - { - partitionKeyMatches = true; - } - else if ((candidatePartitionKey != null) && (partitionKey != null)) - { - partitionKeyMatches = candidatePartitionKey.Equals(partitionKey); - } - else - { - partitionKeyMatches = false; - } - - if (identifierMatches && partitionKeyMatches) - { - return Task.FromResult(TryCatch.FromResult(candidate)); - } - } - - return CreateNotFoundException(partitionKey, identifier); - } - - public Task> MonadicReadFeedAsync( - FeedRangeState feedRangeState, - ReadFeedPaginationOptions readFeedPaginationOptions, - ITrace trace, - CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - - readFeedPaginationOptions ??= ReadFeedPaginationOptions.Default; - - using (ITrace readFeed = trace.StartChild("Read Feed Transport", TraceComponent.Transport, TraceLevel.Info)) - { - TryCatch monadicPartitionKeyRangeId = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRangeState.FeedRange); - if (monadicPartitionKeyRangeId.Failed) - { - return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId.Exception)); - } - - int partitionKeyRangeId = monadicPartitionKeyRangeId.Result; - - if (!this.partitionKeyRangeIdToHashRange.TryGetValue( - partitionKeyRangeId, - out PartitionKeyHashRange range)) - { - return Task.FromResult( - TryCatch.FromException( - new CosmosException( - message: $"PartitionKeyRangeId {partitionKeyRangeId} is gone", - statusCode: System.Net.HttpStatusCode.Gone, - subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, - activityId: Guid.NewGuid().ToString(), - requestCharge: 42))); - } - - if (!this.partitionedRecords.TryGetValue(range, out Records records)) - { - throw new InvalidOperationException("failed to find the range."); - } - - (ulong pkrangeId, ulong documentIndex) rangeIdAndIndex; - if (feedRangeState.State is ReadFeedBeginningState) - { - rangeIdAndIndex = (0, 0); - } - else if (feedRangeState.State is ReadFeedContinuationState readFeedContinuationState) - { - ResourceIdentifier resourceIdentifier = ResourceIdentifier.Parse(((CosmosString)readFeedContinuationState.ContinuationToken).Value); - rangeIdAndIndex = (resourceIdentifier.Database, resourceIdentifier.Document); - } - else - { - throw new InvalidOperationException("Unknown read feed state"); - } - - List page = records - .Where((record) => - { - if (!IsRecordWithinFeedRange(record, feedRangeState.FeedRange, this.partitionKeyDefinition)) - { - return false; - } - - // We do a filter on a composite index here - int pkRangeIdCompare = record.ResourceIdentifier.Database.CompareTo((uint)rangeIdAndIndex.pkrangeId); - if (pkRangeIdCompare < 0) - { - return false; - } - else if (pkRangeIdCompare > 0) - { - return true; - } - else // pkRangeIdCompare == 0 - { - return record.ResourceIdentifier.Document > rangeIdAndIndex.documentIndex; - } - }) - .Take(readFeedPaginationOptions.PageSizeLimit.GetValueOrDefault(int.MaxValue)) - .ToList(); - - List documents = new List(); - foreach (Record record in page) - { - CosmosObject document = ConvertRecordToCosmosElement(record); - documents.Add(CosmosObject.Create(document)); - } - - ReadFeedState continuationState; - if (documents.Count == 0) - { - continuationState = null; - } - else - { - ResourceId resourceIdentifier = page.Last().ResourceIdentifier; - CosmosString continuationToken = CosmosString.Create(resourceIdentifier.ToString()); - continuationState = ReadFeedState.Continuation(continuationToken); - } - - CosmosArray cosmosDocuments = CosmosArray.Create(documents); - CosmosNumber cosmosCount = CosmosNumber64.Create(cosmosDocuments.Count); - CosmosString cosmosRid = CosmosString.Create("AYIMAMmFOw8YAAAAAAAAAA=="); - - Dictionary responseDictionary = new Dictionary() - { - { "Documents", cosmosDocuments }, - { "_count", cosmosCount }, - { "_rid", cosmosRid }, - }; - CosmosObject cosmosResponse = CosmosObject.Create(responseDictionary); - IJsonWriter jsonWriter = Cosmos.Json.JsonWriter.Create(JsonSerializationFormat.Text); - cosmosResponse.WriteTo(jsonWriter); - byte[] result = jsonWriter.GetResult().ToArray(); - MemoryStream responseStream = new MemoryStream(result); - - ReadFeedPage readFeedPage = new ReadFeedPage( - responseStream, - requestCharge: 42, - activityId: Guid.NewGuid().ToString(), - additionalHeaders: new Dictionary() - { - { "test-header", "test-value" } - }, - continuationState); - - return Task.FromResult(TryCatch.FromResult(readFeedPage)); - } - } - - public virtual Task> MonadicQueryAsync( - SqlQuerySpec sqlQuerySpec, - FeedRangeState feedRangeState, - QueryPaginationOptions queryPaginationOptions, - ITrace trace, - CancellationToken cancellationToken) - { - if (cancellationToken.IsCancellationRequested) - { - return Task.FromCanceled>(cancellationToken); - } - if (sqlQuerySpec == null) - { - throw new ArgumentNullException(nameof(sqlQuerySpec)); - } - - using (ITrace childTrace = trace.StartChild("Query Transport", TraceComponent.Transport, TraceLevel.Info)) - { - TryCatch monadicPartitionKeyRangeId = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRangeState.FeedRange); - if (monadicPartitionKeyRangeId.Failed) - { - return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId.Exception)); - } - - int partitionKeyRangeId = monadicPartitionKeyRangeId.Result; - - if (!this.partitionKeyRangeIdToHashRange.TryGetValue( - partitionKeyRangeId, - out PartitionKeyHashRange range)) - { - return Task.FromResult(TryCatch.FromException( - new CosmosException( - message: $"PartitionKeyRangeId {partitionKeyRangeId} is gone", - statusCode: System.Net.HttpStatusCode.Gone, - subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, - activityId: Guid.NewGuid().ToString(), - requestCharge: 42))); - } - - if (!this.partitionedRecords.TryGetValue(range, out Records records)) - { - throw new InvalidOperationException("failed to find the range."); - } - - List documents = new List(); - foreach (Record record in records.Where(r => IsRecordWithinFeedRange(r, feedRangeState.FeedRange, this.partitionKeyDefinition))) - { - CosmosObject document = ConvertRecordToCosmosElement(record); - documents.Add(CosmosObject.Create(document)); - } - - TryCatch monadicParse = SqlQueryParser.Monadic.Parse(sqlQuerySpec.QueryText); - if (monadicParse.Failed) - { - return Task.FromResult(TryCatch.FromException(monadicParse.Exception)); - } - - SqlQuery sqlQuery = monadicParse.Result; - if ((sqlQuery.OrderByClause != null) && (feedRangeState.State != null) && (sqlQuerySpec.ResumeFilter == null)) - { - // This is a hack. - // If the query is an ORDER BY query then we need to seek to the resume term. - // Since I don't want to port over the proper logic from the backend I will just inject a filter. - // For now I am only handling the single order by item case - if (sqlQuery.OrderByClause.OrderByItems.Length != 1) - { - throw new NotImplementedException("Can only support a single order by column"); - } - - SqlOrderByItem orderByItem = sqlQuery.OrderByClause.OrderByItems[0]; - CosmosObject parsedContinuationToken = CosmosObject.Parse(((CosmosString)feedRangeState.State.Value).Value); - SqlBinaryScalarExpression resumeFilter = SqlBinaryScalarExpression.Create( - orderByItem.IsDescending ? SqlBinaryScalarOperatorKind.LessThan : SqlBinaryScalarOperatorKind.GreaterThan, - orderByItem.Expression, - parsedContinuationToken["orderByItem"].Accept(CosmosElementToSqlScalarExpressionVisitor.Singleton)); - - SqlWhereClause modifiedWhereClause = sqlQuery.WhereClause.FilterExpression == null - ? SqlWhereClause.Create(resumeFilter) - : SqlWhereClause.Create( - SqlBinaryScalarExpression.Create( - SqlBinaryScalarOperatorKind.And, - sqlQuery.WhereClause.FilterExpression, - resumeFilter)); - - sqlQuery = SqlQuery.Create( - sqlQuery.SelectClause, - sqlQuery.FromClause, - modifiedWhereClause, - sqlQuery.GroupByClause, - sqlQuery.OrderByClause, - sqlQuery.OffsetLimitClause); - - // We still need to handle duplicate values and break the tie with the rid - // But since all the values are unique for our testing purposes we can ignore this for now. - } - IEnumerable queryResults = SqlInterpreter.ExecuteQuery(documents, sqlQuery); - IEnumerable queryPageResults = queryResults; - - // If the resume value is passed in query spec, filter out the results that has order by item value smaller than resume values - if (sqlQuerySpec.ResumeFilter != null) - { - if (sqlQuery.OrderByClause.OrderByItems.Length != 1) - { - throw new NotImplementedException("Can only support a single order by column"); - } - - SqlOrderByItem orderByItem = sqlQuery.OrderByClause.OrderByItems[0]; - IEnumerator queryResultEnumerator = queryPageResults.GetEnumerator(); - - int skipCount = 0; - while(queryResultEnumerator.MoveNext()) - { - CosmosObject document = (CosmosObject)queryResultEnumerator.Current; - CosmosElement orderByValue = ((CosmosObject)((CosmosArray)document["orderByItems"])[0])["item"]; - - int sortOrderCompare = sqlQuerySpec.ResumeFilter.ResumeValues[0].CompareTo(orderByValue); - - if (sortOrderCompare != 0) - { - sortOrderCompare = orderByItem.IsDescending ? -sortOrderCompare : sortOrderCompare; - } - - if (sortOrderCompare < 0) - { - // We might have passed the item due to deletions and filters. - break; - } - - if (sortOrderCompare >= 0) - { - // This document does not match the sort order, so skip it. - skipCount++; - } - } - - queryPageResults = queryPageResults.Skip(skipCount); - - // NOTE: We still need to handle duplicate values and break the tie with the rid - // But since all the values are unique for our testing purposes we can ignore this for now. - } - - // Filter for the continuation token - string continuationResourceId; - int continuationSkipCount; - - if ((sqlQuery.OrderByClause == null) && (feedRangeState.State != null)) - { - CosmosObject parsedContinuationToken = CosmosObject.Parse(((CosmosString)feedRangeState.State.Value).Value); - continuationResourceId = ((CosmosString)parsedContinuationToken["resourceId"]).Value; - continuationSkipCount = (int)Number64.ToLong(((CosmosNumber64)parsedContinuationToken["skipCount"]).Value); - - ResourceIdentifier continuationParsedResourceId = ResourceIdentifier.Parse(continuationResourceId); - queryPageResults = queryPageResults.Where(c => - { - ResourceId documentResourceId = ResourceId.Parse(((CosmosString)((CosmosObject)c)["_rid"]).Value); - // Perform a composite filter on pkrange id and document index - int pkRangeIdCompare = documentResourceId.Database.CompareTo(continuationParsedResourceId.Database); - if (pkRangeIdCompare < 0) - { - return false; - } - else if (pkRangeIdCompare > 0) - { - return true; - } - else // pkRangeIdCompare == 0 - { - int documentCompare = documentResourceId.Document.CompareTo(continuationParsedResourceId.Document); - - // If we have a skip count, then we can't skip over the rid we last saw, since - // there are documents with the same rid that we need to skip over. - return continuationSkipCount == 0 ? documentCompare > 0 : documentCompare >= 0; - } - }); - - for (int i = 0; i < continuationSkipCount; i++) - { - if (queryPageResults.FirstOrDefault() is CosmosObject firstDocument) - { - string currentResourceId = ((CosmosString)firstDocument["_rid"]).Value; - if (currentResourceId == continuationResourceId) - { - queryPageResults = queryPageResults.Skip(1); - } - } - } - } - else - { - continuationResourceId = null; - continuationSkipCount = 0; - } - - queryPageResults = queryPageResults.Take((queryPaginationOptions ?? QueryPaginationOptions.Default).PageSizeLimit.GetValueOrDefault(int.MaxValue)); - List queryPageResultList = queryPageResults.ToList(); - QueryState queryState; - if (queryPageResultList.LastOrDefault() is CosmosObject lastDocument - && lastDocument.TryGetValue("_rid", out CosmosString resourceId)) - { - string currentResourceId = resourceId.Value; - int currentSkipCount = queryPageResultList - .Where(document => ((CosmosString)((CosmosObject)document)["_rid"]).Value == currentResourceId) - .Count(); - if (currentResourceId == continuationResourceId) - { - currentSkipCount += continuationSkipCount; - } - - Dictionary queryStateDictionary = new Dictionary() - { - { "resourceId", CosmosString.Create(currentResourceId) }, - { "skipCount", CosmosNumber64.Create(currentSkipCount) }, - }; - - if (sqlQuery.OrderByClause != null) - { - SqlOrderByItem orderByItem = sqlQuery.OrderByClause.OrderByItems[0]; - string propertyName = ((SqlPropertyRefScalarExpression)orderByItem.Expression).Identifier.Value; - queryStateDictionary["orderByItem"] = ((CosmosObject)lastDocument["payload"])[propertyName]; - } - - CosmosObject queryStateValue = CosmosObject.Create(queryStateDictionary); - - queryState = new QueryState(CosmosString.Create(queryStateValue.ToString())); - } - else - { - queryState = default; - } - - ImmutableDictionary.Builder additionalHeaders = ImmutableDictionary.CreateBuilder(); - additionalHeaders.Add("x-ms-documentdb-partitionkeyrangeid", "0"); - additionalHeaders.Add("x-ms-test-header", "true"); - - return Task.FromResult( - TryCatch.FromResult( - new QueryPage( - queryPageResultList, - requestCharge: 42, - activityId: Guid.NewGuid().ToString(), - responseLengthInBytes: 1337, - cosmosQueryExecutionInfo: default, - distributionPlanSpec: default, - disallowContinuationTokenMessage: default, - additionalHeaders: additionalHeaders.ToImmutable(), - state: queryState))); - } - } - - public Task> MonadicChangeFeedAsync( - FeedRangeState feedRangeState, - ChangeFeedPaginationOptions changeFeedPaginationOptions, - ITrace trace, - CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - - using (ITrace childTrace = trace.StartChild("Change Feed Transport", TraceComponent.Transport, TraceLevel.Info)) - { - TryCatch monadicPartitionKeyRangeId = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRangeState.FeedRange); - if (monadicPartitionKeyRangeId.Failed) - { - return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId.Exception)); - } - - int partitionKeyRangeId = monadicPartitionKeyRangeId.Result; - - if (!this.partitionKeyRangeIdToHashRange.TryGetValue( - partitionKeyRangeId, - out PartitionKeyHashRange range)) - { - return Task.FromResult(TryCatch.FromException( - new CosmosException( - message: $"PartitionKeyRangeId {partitionKeyRangeId} is gone", - statusCode: System.Net.HttpStatusCode.Gone, - subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, - activityId: Guid.NewGuid().ToString(), - requestCharge: 42))); - } - - if (!this.partitionedChanges.TryGetValue(range, out List changes)) - { - throw new InvalidOperationException("failed to find the range."); - } - - List filteredChanges = changes - .Where(change => IsRecordWithinFeedRange(change.Record, feedRangeState.FeedRange, this.partitionKeyDefinition)) - .Where(change => feedRangeState.State.Accept(ChangeFeedPredicate.Singleton, change)) - .Take((changeFeedPaginationOptions ?? ChangeFeedPaginationOptions.Default).PageSizeLimit.GetValueOrDefault(int.MaxValue)) - .ToList(); - - if (filteredChanges.Count == 0) - { - ChangeFeedState notModifiedResponseState = new ChangeFeedStateTime(DateTime.UtcNow); - return Task.FromResult( - TryCatch.FromResult( - new ChangeFeedNotModifiedPage( - requestCharge: 42, - activityId: Guid.NewGuid().ToString(), - additionalHeaders: default, - notModifiedResponseState))); - } - - Change lastChange = filteredChanges.Last(); - CosmosObject continuationToken = CosmosObject.Create( - new Dictionary() - { - { "PkRangeId", CosmosNumber64.Create(lastChange.PartitionKeyRangeId) }, - { "LSN", CosmosNumber64.Create(lastChange.LogicalSequenceNumber) } - }); - - ChangeFeedState responseState = ChangeFeedState.Continuation(continuationToken); - - List documents = new List(); - foreach (Change change in filteredChanges) - { - CosmosObject document = ConvertRecordToCosmosElement(change.Record); - documents.Add(CosmosObject.Create(document)); - } - - CosmosArray cosmosDocuments = CosmosArray.Create(documents); - CosmosNumber cosmosCount = CosmosNumber64.Create(cosmosDocuments.Count); - CosmosString cosmosRid = CosmosString.Create("AYIMAMmFOw8YAAAAAAAAAA=="); - - Dictionary responseDictionary = new Dictionary() - { - { "Documents", cosmosDocuments }, - { "_count", cosmosCount }, - { "_rid", cosmosRid }, - }; - CosmosObject cosmosResponse = CosmosObject.Create(responseDictionary); - IJsonWriter jsonWriter = Cosmos.Json.JsonWriter.Create(JsonSerializationFormat.Text); - cosmosResponse.WriteTo(jsonWriter); - byte[] result = jsonWriter.GetResult().ToArray(); - MemoryStream responseStream = new MemoryStream(result); - - return Task.FromResult( - TryCatch.FromResult( - new ChangeFeedSuccessPage( - responseStream, - requestCharge: 42, - activityId: Guid.NewGuid().ToString(), - additionalHeaders: default, - responseState))); - } - } - - public Task MonadicSplitAsync( - FeedRangeInternal feedRange, - CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - - if (feedRange == null) - { - throw new ArgumentNullException(nameof(feedRange)); - } - - TryCatch monadicPartitionKeyRangeId = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRange); - if (monadicPartitionKeyRangeId.Failed) - { - return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId.Exception)); - } - - int partitionKeyRangeId = monadicPartitionKeyRangeId.Result; - - // Get the current range and records - if (!this.partitionKeyRangeIdToHashRange.TryGetValue( - partitionKeyRangeId, - out PartitionKeyHashRange parentRange)) - { - return Task.FromResult( - TryCatch.FromException( - new CosmosException( - message: $"PartitionKeyRangeId {partitionKeyRangeId} is gone", - statusCode: System.Net.HttpStatusCode.Gone, - subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, - activityId: Guid.NewGuid().ToString(), - requestCharge: 42))); - } - - if (!this.partitionedRecords.TryGetValue(parentRange, out Records parentRecords)) - { - throw new InvalidOperationException("failed to find the range."); - } - - if (!this.partitionedChanges.TryGetValue(parentRange, out List parentChanges)) - { - throw new InvalidOperationException("failed to find the range."); - } - - // Split the range space - PartitionKeyHashRanges partitionKeyHashRanges; - if (this.partitionKeyDefinition.Kind == PartitionKind.MultiHash && - this.partitionKeyDefinition.Paths.Count > 1) - { - //For MultiHash, to help with testing we will split using the median partition key among documents. - PartitionKeyHash midPoint = this.ComputeMedianSplitPointAmongDocumentsInPKRange(parentRange); - partitionKeyHashRanges = PartitionKeyHashRangeSplitterAndMerger.SplitRange(parentRange, midPoint); - } - else - { - partitionKeyHashRanges = PartitionKeyHashRangeSplitterAndMerger.SplitRange( - parentRange, - rangeCount: 2); - } - - // Update the partition routing map - int maxPartitionKeyRangeId = this.partitionKeyRangeIdToHashRange.Keys.Max(); - this.parentToChildMapping[partitionKeyRangeId] = (maxPartitionKeyRangeId + 1, maxPartitionKeyRangeId + 2); - Dictionary newPartitionKeyRangeIdToHashRange = new Dictionary() - { - { maxPartitionKeyRangeId + 1, partitionKeyHashRanges.First() }, - { maxPartitionKeyRangeId + 2, partitionKeyHashRanges.Last() }, - }; - - foreach (KeyValuePair kvp in this.partitionKeyRangeIdToHashRange) - { - int oldRangeId = kvp.Key; - PartitionKeyHashRange oldRange = kvp.Value; - if (!oldRange.Equals(parentRange)) - { - newPartitionKeyRangeIdToHashRange[oldRangeId] = oldRange; - } - } - - // Copy over the partitioned records (minus the parent range) - PartitionKeyHashRangeDictionary newPartitionedRecords = new PartitionKeyHashRangeDictionary( - PartitionKeyHashRanges.Create(newPartitionKeyRangeIdToHashRange.Values)); - - newPartitionedRecords[partitionKeyHashRanges.First()] = new Records(); - newPartitionedRecords[partitionKeyHashRanges.Last()] = new Records(); - - foreach (PartitionKeyHashRange range in this.partitionKeyRangeIdToHashRange.Values) - { - if (!range.Equals(parentRange)) - { - newPartitionedRecords[range] = this.partitionedRecords[range]; - } - } - - PartitionKeyHashRangeDictionary> newPartitionedChanges = new PartitionKeyHashRangeDictionary>( - PartitionKeyHashRanges.Create(newPartitionKeyRangeIdToHashRange.Values)); - - newPartitionedChanges[partitionKeyHashRanges.First()] = new List(); - newPartitionedChanges[partitionKeyHashRanges.Last()] = new List(); - - foreach (PartitionKeyHashRange range in this.partitionKeyRangeIdToHashRange.Values) - { - if (!range.Equals(parentRange)) - { - newPartitionedChanges[range] = this.partitionedChanges[range]; - } - } - - this.partitionedRecords = newPartitionedRecords; - this.partitionedChanges = newPartitionedChanges; - this.partitionKeyRangeIdToHashRange = newPartitionKeyRangeIdToHashRange; - - // Rehash the records in the parent range - foreach (Record record in parentRecords) - { - PartitionKeyHash partitionKeyHash = GetHashFromPayload(record.Payload, this.partitionKeyDefinition); - if (!this.partitionedRecords.TryGetValue(partitionKeyHash, out Records records)) - { - records = new Records(); - this.partitionedRecords[partitionKeyHash] = records; - } - - records.Add(record); - } - - // Rehash the changes in the parent range - foreach (Change change in parentChanges) - { - PartitionKeyHash partitionKeyHash = GetHashFromPayload(change.Record.Payload, this.partitionKeyDefinition); - if (!this.partitionedChanges.TryGetValue(partitionKeyHash, out List changes)) - { - changes = new List(); - this.partitionedChanges[partitionKeyHash] = changes; - } - - changes.Add(change); - } - - return Task.FromResult(TryCatch.FromResult()); - } - - public Task MonadicMergeAsync( - FeedRangeInternal feedRange1, - FeedRangeInternal feedRange2, - CancellationToken cancellationToken) - { - cancellationToken.ThrowIfCancellationRequested(); - - if (feedRange1 == null) - { - throw new ArgumentNullException(nameof(feedRange1)); - } - - if (feedRange2 == null) - { - throw new ArgumentNullException(nameof(feedRange2)); - } - - TryCatch monadicPartitionKeyRangeId1 = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRange1); - if (monadicPartitionKeyRangeId1.Failed) - { - return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId1.Exception)); - } - - int sourceRangeId1 = monadicPartitionKeyRangeId1.Result; - - TryCatch monadicPartitionKeyRangeId2 = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRange2); - if (monadicPartitionKeyRangeId2.Failed) - { - return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId2.Exception)); - } - - int sourceRangeId2 = monadicPartitionKeyRangeId2.Result; - - // Get the range and records - if (!this.partitionKeyRangeIdToHashRange.TryGetValue( - sourceRangeId1, - out PartitionKeyHashRange sourceHashRange1)) - { - return Task.FromResult( - TryCatch.FromException( - new CosmosException( - message: $"PartitionKeyRangeId {sourceRangeId1} is gone", - statusCode: System.Net.HttpStatusCode.Gone, - subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, - activityId: Guid.NewGuid().ToString(), - requestCharge: 42))); - } - - if (!this.partitionedRecords.TryGetValue(sourceHashRange1, out Records sourceRecords1)) - { - throw new InvalidOperationException("failed to find the range."); - } - - if (!this.partitionedChanges.TryGetValue(sourceHashRange1, out List sourceChanges1)) - { - throw new InvalidOperationException("failed to find the range."); - } - - if (!this.partitionKeyRangeIdToHashRange.TryGetValue( - sourceRangeId2, - out PartitionKeyHashRange sourceHashRange2)) - { - return Task.FromResult( - TryCatch.FromException( - new CosmosException( - message: $"PartitionKeyRangeId {sourceRangeId2} is gone", - statusCode: System.Net.HttpStatusCode.Gone, - subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, - activityId: Guid.NewGuid().ToString(), - requestCharge: 42))); - } - - if (!this.partitionedRecords.TryGetValue(sourceHashRange2, out Records sourceRecords2)) - { - throw new InvalidOperationException("failed to find the range."); - } - - if (!this.partitionedChanges.TryGetValue(sourceHashRange2, out List sourceChanges2)) - { - throw new InvalidOperationException("failed to find the range."); - } - - // Merge the range space - TryCatch monadicRanges = PartitionKeyHashRanges.Monadic.Create(new List() - { - sourceHashRange1, - sourceHashRange2 - }); - - if (monadicRanges.Failed) - { - return Task.FromResult(TryCatch.FromException(monadicRanges.Exception)); - } - - PartitionKeyHashRange mergedHashRange = PartitionKeyHashRangeSplitterAndMerger.MergeRanges( - monadicRanges.Result); - - // Update the partition routing map - int maxPartitionKeyRangeId = this.partitionKeyRangeIdToHashRange.Keys.Max(); - Dictionary newPartitionKeyRangeIdToHashRange = new Dictionary() - { - { maxPartitionKeyRangeId + 1, mergedHashRange }, - }; - - foreach (KeyValuePair kvp in this.partitionKeyRangeIdToHashRange) - { - int oldRangeId = kvp.Key; - PartitionKeyHashRange oldRange = kvp.Value; - if (!(oldRange.Equals(sourceHashRange1) || oldRange.Equals(sourceHashRange2))) - { - newPartitionKeyRangeIdToHashRange[oldRangeId] = oldRange; - } - } - - // Copy over the partitioned records (minus the source ranges) - PartitionKeyHashRangeDictionary newPartitionedRecords = new PartitionKeyHashRangeDictionary( - PartitionKeyHashRanges.Create(newPartitionKeyRangeIdToHashRange.Values)); - - newPartitionedRecords[mergedHashRange] = new Records(); - - foreach (PartitionKeyHashRange range in this.partitionKeyRangeIdToHashRange.Values) - { - if (!(range.Equals(sourceHashRange1) || range.Equals(sourceHashRange2))) - { - newPartitionedRecords[range] = this.partitionedRecords[range]; - } - } - - PartitionKeyHashRangeDictionary> newPartitionedChanges = new PartitionKeyHashRangeDictionary>( - PartitionKeyHashRanges.Create(newPartitionKeyRangeIdToHashRange.Values)); - - newPartitionedChanges[mergedHashRange] = new List(); - - foreach (PartitionKeyHashRange range in this.partitionKeyRangeIdToHashRange.Values) - { - if (!(range.Equals(sourceHashRange1) || range.Equals(sourceHashRange2))) - { - newPartitionedChanges[range] = this.partitionedChanges[range]; - } - } - - this.partitionedRecords = newPartitionedRecords; - this.partitionedChanges = newPartitionedChanges; - this.partitionKeyRangeIdToHashRange = newPartitionKeyRangeIdToHashRange; - - // Rehash the records in the source ranges - List combinedOrderedRecords = new List(); - foreach (Records sourceRecords in new Records[] { sourceRecords1, sourceRecords2 }) - { - combinedOrderedRecords.AddRange(sourceRecords); - } - - combinedOrderedRecords = combinedOrderedRecords - .OrderBy(record => record.ResourceIdentifier.Database) - .ThenBy(record => record.ResourceIdentifier.Document) - .ToList(); - - foreach (Record record in combinedOrderedRecords) - { - PartitionKeyHash partitionKeyHash = GetHashFromPayload(record.Payload, this.partitionKeyDefinition); - if (!this.partitionedRecords.TryGetValue(partitionKeyHash, out Records records)) - { - records = new Records(); - this.partitionedRecords[partitionKeyHash] = records; - } - - records.Add(record); - } - - // Rehash the changes in the parent range - List combinedOrderedChanges = new List(); - foreach (List sourceChanges in new List[] { sourceChanges1, sourceChanges2 }) - { - combinedOrderedChanges.AddRange(sourceChanges); - } - - combinedOrderedChanges = combinedOrderedChanges - .OrderBy(change => change.PartitionKeyRangeId) - .ThenBy(change => change.LogicalSequenceNumber) - .ToList(); - - foreach (Change change in combinedOrderedChanges) - { - PartitionKeyHash partitionKeyHash = GetHashFromPayload(change.Record.Payload, this.partitionKeyDefinition); - if (!this.partitionedChanges.TryGetValue(partitionKeyHash, out List changes)) - { - changes = new List(); - this.partitionedChanges[partitionKeyHash] = changes; - } - - changes.Add(change); - } - - return Task.FromResult(TryCatch.FromResult()); - } - - public IEnumerable PartitionKeyRangeIds => this.partitionKeyRangeIdToHashRange.Keys; - - private static PartitionKeyHash GetHashFromPayload( - CosmosObject payload, - PartitionKeyDefinition partitionKeyDefinition) - { - IList partitionKey = GetPartitionKeysFromPayload(payload, partitionKeyDefinition); - return GetHashFromPartitionKeys(partitionKey, partitionKeyDefinition); - } - - private static PartitionKeyHash GetHashFromObjectModel( - Cosmos.PartitionKey payload, - PartitionKeyDefinition partitionKeyDefinition) - { - IList partitionKeys = GetPartitionKeysFromObjectModel(payload); - return GetHashFromPartitionKeys(partitionKeys, partitionKeyDefinition); - } - - private static CosmosElement GetPartitionKeyFromPayload(CosmosObject payload, PartitionKeyDefinition partitionKeyDefinition) - { - // Restrict the partition key definition for now to keep things simple - if (partitionKeyDefinition.Kind != PartitionKind.Hash) - { - throw new ArgumentOutOfRangeException("Can only support hash partitioning"); - } - - if (partitionKeyDefinition.Version != Documents.PartitionKeyDefinitionVersion.V2) - { - throw new ArgumentOutOfRangeException("Can only support hash v2"); - } - - if (partitionKeyDefinition.Paths.Count != 1) - { - throw new ArgumentOutOfRangeException("Can only support a single partition key path."); - } - - IEnumerable tokens = partitionKeyDefinition.Paths[0].Split("/").Skip(1); - - CosmosElement partitionKey = payload; - foreach (string token in tokens) - { - if (partitionKey != default) - { - if (!payload.TryGetValue(token, out partitionKey)) - { - partitionKey = default; - } - } - } - - return partitionKey; - } - - private static IList GetPartitionKeysFromPayload(CosmosObject payload, PartitionKeyDefinition partitionKeyDefinition) - { - // Restrict the partition key definition for now to keep things simple - if (partitionKeyDefinition.Kind != PartitionKind.MultiHash && partitionKeyDefinition.Kind != PartitionKind.Hash) - { - throw new ArgumentOutOfRangeException("Can only support Hash/MultiHash partitioning"); - } - - if (partitionKeyDefinition.Version != Documents.PartitionKeyDefinitionVersion.V2) - { - throw new ArgumentOutOfRangeException("Can only support hash v2"); - } - - IList cosmosElements = new List(); - foreach (string partitionKeyPath in partitionKeyDefinition.Paths) - { - IEnumerable tokens = partitionKeyPath.Split("/").Skip(1); - CosmosElement partitionKey = payload; - foreach (string token in tokens) - { - if (partitionKey != default) - { - if (!payload.TryGetValue(token, out partitionKey)) - { - partitionKey = default; - } - } - } - cosmosElements.Add(partitionKey); - } - return cosmosElements; - } - - private static IList GetPartitionKeysFromObjectModel(Cosmos.PartitionKey payload) - { - CosmosArray partitionKeyPayload = CosmosArray.Parse(payload.ToJsonString()); - List cosmosElemementPayload = new List(); - foreach (CosmosElement element in partitionKeyPayload) - { - cosmosElemementPayload.Add(element); - } - return cosmosElemementPayload; - } - - private static PartitionKeyHash GetHashFromPartitionKeys(IList partitionKeys, PartitionKeyDefinition partitionKeyDefinition) - { - // Restrict the partition key definition for now to keep things simple - if (partitionKeyDefinition.Kind != PartitionKind.MultiHash && partitionKeyDefinition.Kind != PartitionKind.Hash) - { - throw new ArgumentOutOfRangeException("Can only support Hash/MultiHash partitioning"); - } - - if (partitionKeyDefinition.Version != Documents.PartitionKeyDefinitionVersion.V2) - { - throw new ArgumentOutOfRangeException("Can only support hash v2"); - } - - IList partitionKeyHashValues = new List(); - - foreach (CosmosElement partitionKey in partitionKeys) - { - if (partitionKey is CosmosArray cosmosArray) - { - foreach (CosmosElement element in cosmosArray) - { - PartitionKeyHash elementHash = element switch - { - null => PartitionKeyHash.V2.HashUndefined(), - CosmosString stringPartitionKey => PartitionKeyHash.V2.Hash(stringPartitionKey.Value), - CosmosNumber numberPartitionKey => PartitionKeyHash.V2.Hash(Number64.ToDouble(numberPartitionKey.Value)), - CosmosBoolean cosmosBoolean => PartitionKeyHash.V2.Hash(cosmosBoolean.Value), - CosmosNull _ => PartitionKeyHash.V2.HashNull(), - _ => throw new ArgumentOutOfRangeException(), - }; - partitionKeyHashValues.Add(elementHash.HashValues[0]); - } - continue; - } - - PartitionKeyHash partitionKeyHash = partitionKey switch - { - null => PartitionKeyHash.V2.HashUndefined(), - CosmosString stringPartitionKey => PartitionKeyHash.V2.Hash(stringPartitionKey.Value), - CosmosNumber numberPartitionKey => PartitionKeyHash.V2.Hash(Number64.ToDouble(numberPartitionKey.Value)), - CosmosBoolean cosmosBoolean => PartitionKeyHash.V2.Hash(cosmosBoolean.Value), - CosmosNull _ => PartitionKeyHash.V2.HashNull(), - _ => throw new ArgumentOutOfRangeException(), - }; - partitionKeyHashValues.Add(partitionKeyHash.HashValues[0]); - } - - return new PartitionKeyHash(partitionKeyHashValues.ToArray()); - } - - private static CosmosObject ConvertRecordToCosmosElement(Record record) - { - Dictionary keyValuePairs = new Dictionary - { - ["_rid"] = CosmosString.Create(record.ResourceIdentifier.ToString()), - ["_ts"] = CosmosNumber64.Create(record.Timestamp.Ticks), - ["id"] = CosmosString.Create(record.Identifier) - }; - - foreach (KeyValuePair property in record.Payload) - { - keyValuePairs[property.Key] = property.Value; - } - - return CosmosObject.Create(keyValuePairs); - } - - private static bool IsRecordWithinFeedRange( - Record record, - FeedRange feedRange, - PartitionKeyDefinition partitionKeyDefinition) - { - if (feedRange is FeedRangePartitionKey feedRangePartitionKey) - { - IList partitionKey = GetPartitionKeysFromObjectModel(feedRangePartitionKey.PartitionKey); - IList partitionKeyFromRecord = GetPartitionKeysFromPayload(record.Payload, partitionKeyDefinition); - if (partitionKeyDefinition.Kind == PartitionKind.MultiHash) - { - PartitionKeyHash partitionKeyHash = GetHashFromPartitionKeys(partitionKey, partitionKeyDefinition); - PartitionKeyHash partitionKeyFromRecordHash = GetHashFromPartitionKeys(partitionKeyFromRecord, partitionKeyDefinition); - - return partitionKeyHash.Equals(partitionKeyFromRecordHash) || partitionKeyFromRecordHash.Value.StartsWith(partitionKeyHash.Value); - } - return partitionKey.SequenceEqual(partitionKeyFromRecord); - } - else if (feedRange is FeedRangeEpk feedRangeEpk) - { - PartitionKeyHashRange hashRange = FeedRangeEpkToHashRange(feedRangeEpk); - PartitionKeyHash hash = GetHashFromPayload(record.Payload, partitionKeyDefinition); - return hashRange.Contains(hash); - } - else if (feedRange is FeedRangePartitionKeyRange) - { - return true; - } - else - { - throw new NotImplementedException(); - } - } - - private TryCatch MonadicGetPartitionKeyRangeIdFromFeedRange(FeedRange feedRange) - { - int partitionKeyRangeId; - if (feedRange is FeedRangeEpk feedRangeEpk) - { - // Check to see if any of the system ranges contain the user range. - List matchIds; - if (feedRangeEpk.Range.Min.Equals(FeedRangeEpk.FullRange.Range.Min) && feedRangeEpk.Range.Max.Equals(FeedRangeEpk.FullRange.Range.Max)) - { - matchIds = this.PartitionKeyRangeIds.ToList(); - } - else - { - PartitionKeyHashRange hashRange = FeedRangeEpkToHashRange(feedRangeEpk); - matchIds = this.partitionKeyRangeIdToHashRange - .Where(kvp => kvp.Value.Contains(hashRange)) - .Select(kvp => kvp.Key) - .ToList(); - } - - if (matchIds.Count != 1) - { - // Simulate a split exception, since we don't have a partition key range id to route to. - CosmosException goneException = new CosmosException( - message: $"Epk Range: {feedRangeEpk.Range} is gone.", - statusCode: System.Net.HttpStatusCode.Gone, - subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, - activityId: Guid.NewGuid().ToString(), - requestCharge: default); - - return TryCatch.FromException(goneException); - } - - partitionKeyRangeId = matchIds[0]; - } - else if (feedRange is FeedRangePartitionKeyRange feedRangePartitionKeyRange) - { - partitionKeyRangeId = int.Parse(feedRangePartitionKeyRange.PartitionKeyRangeId); - } - else if (feedRange is FeedRangePartitionKey feedRangePartitionKey) - { - PartitionKeyHash partitionKeyHash = GetHashFromObjectModel(feedRangePartitionKey.PartitionKey, this.partitionKeyDefinition); - - int? foundValue = null; - foreach (KeyValuePair kvp in this.partitionKeyRangeIdToHashRange) - { - if (kvp.Value.Contains(partitionKeyHash)) - { - foundValue = kvp.Key; - } - } - - if (!foundValue.HasValue) - { - throw new InvalidOperationException("Failed to find value"); - } - - partitionKeyRangeId = foundValue.Value; - } - else - { - throw new NotImplementedException("Unknown feed range type"); - } - - return TryCatch.FromResult(partitionKeyRangeId); - } - - private static PartitionKeyHashRange FeedRangeEpkToHashRange(FeedRangeEpk feedRangeEpk) - { - PartitionKeyHash? start = feedRangeEpk.Range.Min == string.Empty ? (PartitionKeyHash?)null : PartitionKeyHash.Parse(feedRangeEpk.Range.Min); - PartitionKeyHash? end = feedRangeEpk.Range.Max == string.Empty || feedRangeEpk.Range.Max == "FF" ? (PartitionKeyHash?)null : PartitionKeyHash.Parse(feedRangeEpk.Range.Max); - PartitionKeyHashRange hashRange = new PartitionKeyHashRange(start, end); - return hashRange; - } - - private static FeedRangeEpk HashRangeToFeedRangeEpk(PartitionKeyHashRange hashRange) - { - return new FeedRangeEpk( - new Documents.Routing.Range( - min: hashRange.StartInclusive.HasValue ? hashRange.StartInclusive.ToString() : string.Empty, - max: hashRange.EndExclusive.HasValue ? hashRange.EndExclusive.ToString() : string.Empty, - isMinInclusive: true, - isMaxInclusive: false)); - } - - private PartitionKeyHash ComputeMedianSplitPointAmongDocumentsInPKRange(PartitionKeyHashRange hashRange) - { - if (!this.partitionedRecords.TryGetValue(hashRange, out Records parentRecords)) - { - throw new InvalidOperationException("failed to find the range."); - } - - List partitionKeyHashes = new List(); - foreach (Record record in parentRecords) - { - PartitionKeyHash partitionKeyHash = GetHashFromPayload(record.Payload, this.partitionKeyDefinition); - partitionKeyHashes.Add(partitionKeyHash); - } - - partitionKeyHashes.Sort(); - PartitionKeyHash medianPkHash = partitionKeyHashes[partitionKeyHashes.Count / 2]; - - // For MultiHash Collection, split at top level to ensure documents for top level key exist across partitions - // after split - if (medianPkHash.HashValues.Count > 1) - { - return new PartitionKeyHash(medianPkHash.HashValues[0]); - } - - return medianPkHash; - } - public Task> MonadicGetResourceIdentifierAsync(ITrace trace, CancellationToken cancellationToken) - { - return Task.FromResult(TryCatch.FromResult("AYIMAMmFOw8YAAAAAAAAAA==")); - } - - private sealed class Records : IReadOnlyList - { - private readonly List storage; - - public Records() - { - this.storage = new List(); - } - - public Record this[int index] => this.storage[index]; - - public int Count => this.storage.Count; - - public IEnumerator GetEnumerator() => this.storage.GetEnumerator(); - - IEnumerator IEnumerable.GetEnumerator() => this.storage.GetEnumerator(); - - public Record Add(int pkrangeid, CosmosObject payload) - { - // using pkrangeid for database since resource id doesnt serialize both document and pkrangeid. - ResourceId currentResourceId; - if (this.Count == 0) - { - currentResourceId = ResourceId.Parse("AYIMAMmFOw8YAAAAAAAAAA=="); - - PropertyInfo documentProp = currentResourceId - .GetType() - .GetProperty("Document", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); - documentProp.SetValue(currentResourceId, (ulong)1); - - PropertyInfo databaseProp = currentResourceId - .GetType() - .GetProperty("Database", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); - databaseProp.SetValue(currentResourceId, (uint)pkrangeid + 1); - } - else - { - currentResourceId = this.storage[this.storage.Count - 1].ResourceIdentifier; - } - - ResourceId nextResourceId = ResourceId.Parse("AYIMAMmFOw8YAAAAAAAAAA=="); - { - PropertyInfo documentProp = nextResourceId - .GetType() - .GetProperty("Document", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); - documentProp.SetValue(nextResourceId, (ulong)(currentResourceId.Document + 1)); - - PropertyInfo databaseProp = nextResourceId - .GetType() - .GetProperty("Database", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); - databaseProp.SetValue(nextResourceId, (uint)pkrangeid + 1); - } - - Record record = new Record(nextResourceId, DateTime.UtcNow, Guid.NewGuid().ToString(), payload); - this.storage.Add(record); - return record; - } - - public Record Add(Record record) - { - this.storage.Add(record); - return record; - } - } - - private readonly struct Change - { - public Change(Record record, ulong partitionKeyRangeId, ulong logicalSequenceNumber) - { - this.Record = record ?? throw new ArgumentNullException(nameof(record)); - this.PartitionKeyRangeId = partitionKeyRangeId; - this.LogicalSequenceNumber = logicalSequenceNumber; - } - - public Record Record { get; } - public ulong PartitionKeyRangeId { get; } - public ulong LogicalSequenceNumber { get; } - } - - private sealed class ChangeFeedPredicate : IChangeFeedStateVisitor - { - public static readonly ChangeFeedPredicate Singleton = new ChangeFeedPredicate(); - - private ChangeFeedPredicate() - { - } - - public bool Visit(ChangeFeedStateBeginning changeFeedStateBeginning, Change input) => true; - - public bool Visit(ChangeFeedStateTime changeFeedStateTime, Change input) => input.Record.Timestamp >= changeFeedStateTime.StartTime; - - public bool Visit(ChangeFeedStateContinuation changeFeedStateContinuation, Change input) - { - CosmosObject continuation = (CosmosObject)changeFeedStateContinuation.ContinuationToken; - - if (!continuation.TryGetValue("PkRangeId", out CosmosNumber pkRangeIdCosmosElement)) - { - throw new InvalidOperationException("failed to get pkrange id"); - } - - ulong pkRangeId = (ulong)Number64.ToLong(pkRangeIdCosmosElement.Value); - - if (!continuation.TryGetValue("LSN", out CosmosNumber lsnCosmosElement)) - { - throw new InvalidOperationException("failed to get lsn"); - } - - ulong lsn = (ulong)Number64.ToLong(lsnCosmosElement.Value); - - int pkRangeIdCompare = input.PartitionKeyRangeId.CompareTo(pkRangeId); - if (pkRangeIdCompare < 0) - { - return false; - } - else if (pkRangeIdCompare > 0) - { - return true; - } - else - { - return input.LogicalSequenceNumber > lsn; - } - } - - public bool Visit(ChangeFeedStateNow changeFeedStateNow, Change input) - { - DateTime now = DateTime.UtcNow; - ChangeFeedStateTime startTime = new ChangeFeedStateTime(now); - return this.Visit(startTime, input); - } - } - - private sealed class CosmosElementToSqlScalarExpressionVisitor : ICosmosElementVisitor - { - public static readonly CosmosElementToSqlScalarExpressionVisitor Singleton = new CosmosElementToSqlScalarExpressionVisitor(); - - private CosmosElementToSqlScalarExpressionVisitor() - { - // Private constructor, since this class is a singleton. - } - - public SqlScalarExpression Visit(CosmosArray cosmosArray) - { - List items = new List(); - foreach (CosmosElement item in cosmosArray) - { - items.Add(item.Accept(this)); - } - - return SqlArrayCreateScalarExpression.Create(items.ToImmutableArray()); - } - - public SqlScalarExpression Visit(CosmosBinary cosmosBinary) - { - // Can not convert binary to scalar expression without knowing the API type. - throw new NotImplementedException(); - } - - public SqlScalarExpression Visit(CosmosBoolean cosmosBoolean) - { - return SqlLiteralScalarExpression.Create(SqlBooleanLiteral.Create(cosmosBoolean.Value)); - } - - public SqlScalarExpression Visit(CosmosGuid cosmosGuid) - { - // Can not convert guid to scalar expression without knowing the API type. - throw new NotImplementedException(); - } - - public SqlScalarExpression Visit(CosmosNull cosmosNull) - { - return SqlLiteralScalarExpression.Create(SqlNullLiteral.Create()); - } - - public SqlScalarExpression Visit(CosmosNumber cosmosNumber) - { - if (!(cosmosNumber is CosmosNumber64 cosmosNumber64)) - { - throw new ArgumentException($"Unknown {nameof(CosmosNumber)} type: {cosmosNumber.GetType()}."); - } - - return SqlLiteralScalarExpression.Create(SqlNumberLiteral.Create(cosmosNumber64.GetValue())); - } - - public SqlScalarExpression Visit(CosmosObject cosmosObject) - { - List properties = new List(); - foreach (KeyValuePair prop in cosmosObject) - { - SqlPropertyName name = SqlPropertyName.Create(prop.Key); - CosmosElement value = prop.Value; - SqlScalarExpression expression = value.Accept(this); - SqlObjectProperty property = SqlObjectProperty.Create(name, expression); - properties.Add(property); - } - - return SqlObjectCreateScalarExpression.Create(properties.ToImmutableArray()); - } - - public SqlScalarExpression Visit(CosmosString cosmosString) - { - return SqlLiteralScalarExpression.Create(SqlStringLiteral.Create(cosmosString.Value)); - } - - public SqlScalarExpression Visit(CosmosUndefined cosmosUndefined) - { - return SqlLiteralScalarExpression.Create(SqlUndefinedLiteral.Create()); - } - } - } +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Tests.Pagination +{ + using System; + using System.Collections; + using System.Collections.Generic; + using System.Collections.Immutable; + using System.IO; + using System.Linq; + using System.Reflection; + using System.Threading; + using System.Threading.Tasks; + using Microsoft.Azure.Cosmos; + using Microsoft.Azure.Cosmos.ChangeFeed.Pagination; + using Microsoft.Azure.Cosmos.CosmosElements; + using Microsoft.Azure.Cosmos.CosmosElements.Numbers; + using Microsoft.Azure.Cosmos.Json; + using Microsoft.Azure.Cosmos.Pagination; + using Microsoft.Azure.Cosmos.Query.Core; + using Microsoft.Azure.Cosmos.Query.Core.Monads; + using Microsoft.Azure.Cosmos.Query.Core.Parser; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Distinct; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; + using Microsoft.Azure.Cosmos.ReadFeed.Pagination; + using Microsoft.Azure.Cosmos.Routing; + using Microsoft.Azure.Cosmos.Serialization.HybridRow; + using Microsoft.Azure.Cosmos.SqlObjects; + using Microsoft.Azure.Cosmos.Tests.Query.OfflineEngine; + using Microsoft.Azure.Cosmos.Tracing; + using Microsoft.Azure.Documents; + using static Microsoft.Azure.Cosmos.Query.Core.SqlQueryResumeFilter; + using ResourceIdentifier = Cosmos.Pagination.ResourceIdentifier; + using UInt128 = UInt128; + + // Collection useful for mocking requests and repartitioning (splits / merge). + internal class InMemoryContainer : IMonadicDocumentContainer + { + private readonly PartitionKeyDefinition partitionKeyDefinition; + private readonly Dictionary parentToChildMapping; + + private PartitionKeyHashRangeDictionary partitionedRecords; + private PartitionKeyHashRangeDictionary> partitionedChanges; + private Dictionary partitionKeyRangeIdToHashRange; + private Dictionary cachedPartitionKeyRangeIdToHashRange; + + public InMemoryContainer( + PartitionKeyDefinition partitionKeyDefinition) + { + this.partitionKeyDefinition = partitionKeyDefinition ?? throw new ArgumentNullException(nameof(partitionKeyDefinition)); + PartitionKeyHashRange fullRange = new PartitionKeyHashRange(startInclusive: null, endExclusive: new PartitionKeyHash(Cosmos.UInt128.MaxValue)); + PartitionKeyHashRanges partitionKeyHashRanges = PartitionKeyHashRanges.Create(new PartitionKeyHashRange[] { fullRange }); + this.partitionedRecords = new PartitionKeyHashRangeDictionary(partitionKeyHashRanges); + this.partitionedRecords[fullRange] = new Records(); + this.partitionedChanges = new PartitionKeyHashRangeDictionary>(partitionKeyHashRanges); + this.partitionedChanges[fullRange] = new List(); + this.partitionKeyRangeIdToHashRange = new Dictionary() + { + { 0, fullRange } + }; + this.cachedPartitionKeyRangeIdToHashRange = new Dictionary() + { + { 0, fullRange } + }; + this.parentToChildMapping = new Dictionary(); + } + + public Task>> MonadicGetFeedRangesAsync( + ITrace trace, + CancellationToken cancellationToken) => this.MonadicGetChildRangeAsync( + FeedRangeEpk.FullRange, + trace, + cancellationToken); + + public async Task>> MonadicGetChildRangeAsync( + FeedRangeInternal feedRange, + ITrace trace, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (feedRange == null) + { + throw new ArgumentNullException(nameof(feedRange)); + } + + if (trace == null) + { + throw new ArgumentNullException(nameof(trace)); + } + + using (ITrace getChildRangesTrace = trace.StartChild(name: "Get Child Ranges", TraceComponent.Routing, TraceLevel.Info)) + { + FeedRangeEpk CreateRangeFromId(int id) + { + PartitionKeyHashRange hashRange = this.cachedPartitionKeyRangeIdToHashRange[id]; + return new FeedRangeEpk( + new Documents.Routing.Range( + min: hashRange.StartInclusive.HasValue ? hashRange.StartInclusive.Value.ToString() : string.Empty, + max: hashRange.EndExclusive.HasValue ? hashRange.EndExclusive.Value.ToString() : string.Empty, + isMinInclusive: true, + isMaxInclusive: false)); + } + + if (feedRange is FeedRangePartitionKey) + { + throw new ArgumentException("Can not get the child of a logical partition key"); + } + + if (feedRange.Equals(FeedRangeEpk.FullRange)) + { + List ranges = new List(); + foreach (int id in this.cachedPartitionKeyRangeIdToHashRange.Keys) + { + ranges.Add(CreateRangeFromId(id)); + } + + return TryCatch>.FromResult(ranges); + } + + if (feedRange is FeedRangeEpk feedRangeEpk) + { + // look for overlapping epk ranges. + List overlappingRanges; + if (feedRangeEpk.Range.Min.Equals(FeedRangeEpk.FullRange.Range.Min) && feedRangeEpk.Range.Max.Equals(FeedRangeEpk.FullRange.Range.Max)) + { + overlappingRanges = this.cachedPartitionKeyRangeIdToHashRange.Select(kvp => CreateRangeFromId(kvp.Key)).ToList(); + } + else + { + overlappingRanges = new List(); + PartitionKeyHashRange userRange = FeedRangeEpkToHashRange(feedRangeEpk); + foreach (PartitionKeyHashRange systemRange in this.cachedPartitionKeyRangeIdToHashRange.Values) + { + if (userRange.TryGetOverlappingRange(systemRange, out PartitionKeyHashRange overlappingRange)) + { + overlappingRanges.Add(HashRangeToFeedRangeEpk(overlappingRange)); + } + } + } + + if (overlappingRanges.Count == 0) + { + return TryCatch>.FromException( + new KeyNotFoundException( + $"PartitionKeyRangeId: {feedRangeEpk} does not exist.")); + } + + return TryCatch>.FromResult(overlappingRanges); + } + + if (!(feedRange is FeedRangePartitionKeyRange feedRangePartitionKeyRange)) + { + throw new InvalidOperationException("Expected feed range to be a partition key range at this point."); + } + + if (!int.TryParse(feedRangePartitionKeyRange.PartitionKeyRangeId, out int partitionKeyRangeId)) + { + return TryCatch>.FromException( + new FormatException( + $"PartitionKeyRangeId: {feedRangePartitionKeyRange.PartitionKeyRangeId} is not an integer.")); + } + + if (!this.parentToChildMapping.TryGetValue(partitionKeyRangeId, out (int left, int right) children)) + { + // This range has no children (base case) + if (!this.cachedPartitionKeyRangeIdToHashRange.TryGetValue(partitionKeyRangeId, out PartitionKeyHashRange hashRange)) + { + return TryCatch>.FromException( + new KeyNotFoundException( + $"PartitionKeyRangeId: {partitionKeyRangeId} does not exist.")); + } + + List singleRange = new List() + { + CreateRangeFromId(partitionKeyRangeId), + }; + + return TryCatch>.FromResult(singleRange); + } + + // Recurse on the left and right child. + FeedRangeInternal left = new FeedRangePartitionKeyRange(children.left.ToString()); + FeedRangeInternal right = new FeedRangePartitionKeyRange(children.right.ToString()); + + TryCatch> tryGetLeftRanges = await this.MonadicGetChildRangeAsync(left, trace, cancellationToken); + if (tryGetLeftRanges.Failed) + { + return tryGetLeftRanges; + } + + TryCatch> tryGetRightRanges = await this.MonadicGetChildRangeAsync(right, trace, cancellationToken); + if (tryGetRightRanges.Failed) + { + return tryGetRightRanges; + } + + List recursiveOverlappingRanges = tryGetLeftRanges.Result.Concat(tryGetRightRanges.Result).ToList(); + return TryCatch>.FromResult(recursiveOverlappingRanges); + } + } + + public Task MonadicRefreshProviderAsync( + ITrace trace, + CancellationToken cancellationToken) + { + using (ITrace refreshProviderTrace = trace.StartChild("Refreshing FeedRangeProvider", TraceComponent.Routing, TraceLevel.Info)) + { + this.cachedPartitionKeyRangeIdToHashRange = new Dictionary(this.partitionKeyRangeIdToHashRange); + return Task.FromResult(TryCatch.FromResult()); + } + } + + public Task> MonadicCreateItemAsync( + CosmosObject payload, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (payload == null) + { + throw new ArgumentNullException(nameof(payload)); + } + + PartitionKeyHash partitionKeyHash = GetHashFromPayload(payload, this.partitionKeyDefinition); + if (!this.partitionedRecords.TryGetValue(partitionKeyHash, out Records records)) + { + records = new Records(); + this.partitionedRecords[partitionKeyHash] = records; + } + + int? pkrangeid = null; + foreach (KeyValuePair kvp in this.partitionKeyRangeIdToHashRange) + { + if (kvp.Value.Contains(partitionKeyHash)) + { + pkrangeid = kvp.Key; + } + } + + if (!pkrangeid.HasValue) + { + throw new InvalidOperationException(); + } + + Record recordAdded = records.Add(pkrangeid.Value, payload); + + if (!this.partitionedChanges.TryGetValue(partitionKeyHash, out List changes)) + { + changes = new List(); + this.partitionedChanges[partitionKeyHash] = changes; + } + + ulong maxLogicalSequenceNumber = changes.Count == 0 ? 0 : changes.Select(change => change.LogicalSequenceNumber).Max(); + + Change change = new Change( + recordAdded, + partitionKeyRangeId: (ulong)pkrangeid.Value, + logicalSequenceNumber: maxLogicalSequenceNumber + 1); + + changes.Add(change); + return Task.FromResult(TryCatch.FromResult(recordAdded)); + } + + public Task> MonadicReadItemAsync( + CosmosElement partitionKey, + string identifier, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + static Task> CreateNotFoundException(CosmosElement partitionKey, string identifer) + { + return Task.FromResult( + TryCatch.FromException( + new CosmosException( + message: $"Document with partitionKey: {partitionKey?.ToString() ?? "UNDEFINED"} and id: {identifer} not found.", + statusCode: System.Net.HttpStatusCode.NotFound, + subStatusCode: default, + activityId: Guid.NewGuid().ToString(), + requestCharge: 42))); + } + + PartitionKeyHash partitionKeyHash = GetHashFromPartitionKeys( + new List { partitionKey }, + this.partitionKeyDefinition); + + if (!this.partitionedRecords.TryGetValue(partitionKeyHash, out Records records)) + { + return CreateNotFoundException(partitionKey, identifier); + } + + foreach (Record candidate in records) + { + bool identifierMatches = candidate.Identifier == identifier; + + CosmosElement candidatePartitionKey = GetPartitionKeyFromPayload( + candidate.Payload, + this.partitionKeyDefinition); + + bool partitionKeyMatches; + if (candidatePartitionKey is null && partitionKey is null) + { + partitionKeyMatches = true; + } + else if ((candidatePartitionKey != null) && (partitionKey != null)) + { + partitionKeyMatches = candidatePartitionKey.Equals(partitionKey); + } + else + { + partitionKeyMatches = false; + } + + if (identifierMatches && partitionKeyMatches) + { + return Task.FromResult(TryCatch.FromResult(candidate)); + } + } + + return CreateNotFoundException(partitionKey, identifier); + } + + public Task> MonadicReadFeedAsync( + FeedRangeState feedRangeState, + ReadFeedPaginationOptions readFeedPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + readFeedPaginationOptions ??= ReadFeedPaginationOptions.Default; + + using (ITrace readFeed = trace.StartChild("Read Feed Transport", TraceComponent.Transport, TraceLevel.Info)) + { + TryCatch monadicPartitionKeyRangeId = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRangeState.FeedRange); + if (monadicPartitionKeyRangeId.Failed) + { + return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId.Exception)); + } + + int partitionKeyRangeId = monadicPartitionKeyRangeId.Result; + + if (!this.partitionKeyRangeIdToHashRange.TryGetValue( + partitionKeyRangeId, + out PartitionKeyHashRange range)) + { + return Task.FromResult( + TryCatch.FromException( + new CosmosException( + message: $"PartitionKeyRangeId {partitionKeyRangeId} is gone", + statusCode: System.Net.HttpStatusCode.Gone, + subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, + activityId: Guid.NewGuid().ToString(), + requestCharge: 42))); + } + + if (!this.partitionedRecords.TryGetValue(range, out Records records)) + { + throw new InvalidOperationException("failed to find the range."); + } + + (ulong pkrangeId, ulong documentIndex) rangeIdAndIndex; + if (feedRangeState.State is ReadFeedBeginningState) + { + rangeIdAndIndex = (0, 0); + } + else if (feedRangeState.State is ReadFeedContinuationState readFeedContinuationState) + { + ResourceIdentifier resourceIdentifier = ResourceIdentifier.Parse(((CosmosString)readFeedContinuationState.ContinuationToken).Value); + rangeIdAndIndex = (resourceIdentifier.Database, resourceIdentifier.Document); + } + else + { + throw new InvalidOperationException("Unknown read feed state"); + } + + List page = records + .Where((record) => + { + if (!IsRecordWithinFeedRange(record, feedRangeState.FeedRange, this.partitionKeyDefinition)) + { + return false; + } + + // We do a filter on a composite index here + int pkRangeIdCompare = record.ResourceIdentifier.Database.CompareTo((uint)rangeIdAndIndex.pkrangeId); + if (pkRangeIdCompare < 0) + { + return false; + } + else if (pkRangeIdCompare > 0) + { + return true; + } + else // pkRangeIdCompare == 0 + { + return record.ResourceIdentifier.Document > rangeIdAndIndex.documentIndex; + } + }) + .Take(readFeedPaginationOptions.PageSizeLimit.GetValueOrDefault(int.MaxValue)) + .ToList(); + + List documents = new List(); + foreach (Record record in page) + { + CosmosObject document = ConvertRecordToCosmosElement(record); + documents.Add(CosmosObject.Create(document)); + } + + ReadFeedState continuationState; + if (documents.Count == 0) + { + continuationState = null; + } + else + { + ResourceId resourceIdentifier = page.Last().ResourceIdentifier; + CosmosString continuationToken = CosmosString.Create(resourceIdentifier.ToString()); + continuationState = ReadFeedState.Continuation(continuationToken); + } + + CosmosArray cosmosDocuments = CosmosArray.Create(documents); + CosmosNumber cosmosCount = CosmosNumber64.Create(cosmosDocuments.Count); + CosmosString cosmosRid = CosmosString.Create("AYIMAMmFOw8YAAAAAAAAAA=="); + + Dictionary responseDictionary = new Dictionary() + { + { "Documents", cosmosDocuments }, + { "_count", cosmosCount }, + { "_rid", cosmosRid }, + }; + CosmosObject cosmosResponse = CosmosObject.Create(responseDictionary); + IJsonWriter jsonWriter = Cosmos.Json.JsonWriter.Create(JsonSerializationFormat.Text); + cosmosResponse.WriteTo(jsonWriter); + byte[] result = jsonWriter.GetResult().ToArray(); + MemoryStream responseStream = new MemoryStream(result); + + ReadFeedPage readFeedPage = new ReadFeedPage( + responseStream, + requestCharge: 42, + itemCount: cosmosDocuments.Count, + activityId: Guid.NewGuid().ToString(), + additionalHeaders: new Dictionary() + { + { "test-header", "test-value" } + }, + continuationState); + + return Task.FromResult(TryCatch.FromResult(readFeedPage)); + } + } + + public virtual Task> MonadicQueryAsync( + SqlQuerySpec sqlQuerySpec, + FeedRangeState feedRangeState, + QueryPaginationOptions queryPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled>(cancellationToken); + } + if (sqlQuerySpec == null) + { + throw new ArgumentNullException(nameof(sqlQuerySpec)); + } + + using (ITrace childTrace = trace.StartChild("Query Transport", TraceComponent.Transport, TraceLevel.Info)) + { + TryCatch monadicPartitionKeyRangeId = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRangeState.FeedRange); + if (monadicPartitionKeyRangeId.Failed) + { + return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId.Exception)); + } + + int partitionKeyRangeId = monadicPartitionKeyRangeId.Result; + + if (!this.partitionKeyRangeIdToHashRange.TryGetValue( + partitionKeyRangeId, + out PartitionKeyHashRange range)) + { + return Task.FromResult(TryCatch.FromException( + new CosmosException( + message: $"PartitionKeyRangeId {partitionKeyRangeId} is gone", + statusCode: System.Net.HttpStatusCode.Gone, + subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, + activityId: Guid.NewGuid().ToString(), + requestCharge: 42))); + } + + if (!this.partitionedRecords.TryGetValue(range, out Records records)) + { + throw new InvalidOperationException("failed to find the range."); + } + + List documents = new List(); + foreach (Record record in records.Where(r => IsRecordWithinFeedRange(r, feedRangeState.FeedRange, this.partitionKeyDefinition))) + { + CosmosObject document = ConvertRecordToCosmosElement(record); + documents.Add(CosmosObject.Create(document)); + } + + TryCatch monadicParse = SqlQueryParser.Monadic.Parse(sqlQuerySpec.QueryText); + if (monadicParse.Failed) + { + return Task.FromResult(TryCatch.FromException(monadicParse.Exception)); + } + + SqlQuery sqlQuery = monadicParse.Result; + if ((sqlQuery.OrderByClause != null) && (feedRangeState.State != null) && (sqlQuerySpec.ResumeFilter == null)) + { + // This is a hack. + // If the query is an ORDER BY query then we need to seek to the resume term. + // Since I don't want to port over the proper logic from the backend I will just inject a filter. + // For now I am only handling the single order by item case + if (sqlQuery.OrderByClause.OrderByItems.Length != 1) + { + throw new NotImplementedException("Can only support a single order by column"); + } + + SqlOrderByItem orderByItem = sqlQuery.OrderByClause.OrderByItems[0]; + CosmosObject parsedContinuationToken = CosmosObject.Parse(((CosmosString)feedRangeState.State.Value).Value); + SqlBinaryScalarExpression resumeFilter = SqlBinaryScalarExpression.Create( + orderByItem.IsDescending ? SqlBinaryScalarOperatorKind.LessThan : SqlBinaryScalarOperatorKind.GreaterThan, + orderByItem.Expression, + parsedContinuationToken["orderByItem"].Accept(CosmosElementToSqlScalarExpressionVisitor.Singleton)); + + SqlWhereClause modifiedWhereClause = sqlQuery.WhereClause.FilterExpression == null + ? SqlWhereClause.Create(resumeFilter) + : SqlWhereClause.Create( + SqlBinaryScalarExpression.Create( + SqlBinaryScalarOperatorKind.And, + sqlQuery.WhereClause.FilterExpression, + resumeFilter)); + + sqlQuery = SqlQuery.Create( + sqlQuery.SelectClause, + sqlQuery.FromClause, + modifiedWhereClause, + sqlQuery.GroupByClause, + sqlQuery.OrderByClause, + sqlQuery.OffsetLimitClause); + + // We still need to handle duplicate values and break the tie with the rid + // But since all the values are unique for our testing purposes we can ignore this for now. + } + IEnumerable queryResults = SqlInterpreter.ExecuteQuery(documents, sqlQuery); + IEnumerable queryPageResults = queryResults; + + // If the resume value is passed in query spec, filter out the results that has order by item value smaller than resume values + if (sqlQuerySpec.ResumeFilter != null) + { + if (sqlQuery.OrderByClause.OrderByItems.Length != 1) + { + throw new NotImplementedException("Can only support a single order by column"); + } + + SqlOrderByItem orderByItem = sqlQuery.OrderByClause.OrderByItems[0]; + IEnumerator queryResultEnumerator = queryPageResults.GetEnumerator(); + + int skipCount = 0; + while(queryResultEnumerator.MoveNext()) + { + CosmosObject document = (CosmosObject)queryResultEnumerator.Current; + CosmosElement orderByValue = ((CosmosObject)((CosmosArray)document["orderByItems"])[0])["item"]; + + int sortOrderCompare = sqlQuerySpec.ResumeFilter.ResumeValues[0].CompareTo(orderByValue); + + if (sortOrderCompare != 0) + { + sortOrderCompare = orderByItem.IsDescending ? -sortOrderCompare : sortOrderCompare; + } + + if (sortOrderCompare < 0) + { + // We might have passed the item due to deletions and filters. + break; + } + + if (sortOrderCompare >= 0) + { + // This document does not match the sort order, so skip it. + skipCount++; + } + } + + queryPageResults = queryPageResults.Skip(skipCount); + + // NOTE: We still need to handle duplicate values and break the tie with the rid + // But since all the values are unique for our testing purposes we can ignore this for now. + } + + // Filter for the continuation token + string continuationResourceId; + int continuationSkipCount; + + if ((sqlQuery.OrderByClause == null) && (feedRangeState.State != null)) + { + CosmosObject parsedContinuationToken = CosmosObject.Parse(((CosmosString)feedRangeState.State.Value).Value); + continuationResourceId = ((CosmosString)parsedContinuationToken["resourceId"]).Value; + continuationSkipCount = (int)Number64.ToLong(((CosmosNumber64)parsedContinuationToken["skipCount"]).Value); + + ResourceIdentifier continuationParsedResourceId = ResourceIdentifier.Parse(continuationResourceId); + queryPageResults = queryPageResults.Where(c => + { + ResourceId documentResourceId = ResourceId.Parse(((CosmosString)((CosmosObject)c)["_rid"]).Value); + // Perform a composite filter on pkrange id and document index + int pkRangeIdCompare = documentResourceId.Database.CompareTo(continuationParsedResourceId.Database); + if (pkRangeIdCompare < 0) + { + return false; + } + else if (pkRangeIdCompare > 0) + { + return true; + } + else // pkRangeIdCompare == 0 + { + int documentCompare = documentResourceId.Document.CompareTo(continuationParsedResourceId.Document); + + // If we have a skip count, then we can't skip over the rid we last saw, since + // there are documents with the same rid that we need to skip over. + return continuationSkipCount == 0 ? documentCompare > 0 : documentCompare >= 0; + } + }); + + for (int i = 0; i < continuationSkipCount; i++) + { + if (queryPageResults.FirstOrDefault() is CosmosObject firstDocument) + { + string currentResourceId = ((CosmosString)firstDocument["_rid"]).Value; + if (currentResourceId == continuationResourceId) + { + queryPageResults = queryPageResults.Skip(1); + } + } + } + } + else + { + continuationResourceId = null; + continuationSkipCount = 0; + } + + queryPageResults = queryPageResults.Take((queryPaginationOptions ?? QueryPaginationOptions.Default).PageSizeLimit.GetValueOrDefault(int.MaxValue)); + List queryPageResultList = queryPageResults.ToList(); + QueryState queryState; + if (queryPageResultList.LastOrDefault() is CosmosObject lastDocument + && lastDocument.TryGetValue("_rid", out CosmosString resourceId)) + { + string currentResourceId = resourceId.Value; + int currentSkipCount = queryPageResultList + .Where(document => ((CosmosString)((CosmosObject)document)["_rid"]).Value == currentResourceId) + .Count(); + if (currentResourceId == continuationResourceId) + { + currentSkipCount += continuationSkipCount; + } + + Dictionary queryStateDictionary = new Dictionary() + { + { "resourceId", CosmosString.Create(currentResourceId) }, + { "skipCount", CosmosNumber64.Create(currentSkipCount) }, + }; + + if (sqlQuery.OrderByClause != null) + { + SqlOrderByItem orderByItem = sqlQuery.OrderByClause.OrderByItems[0]; + string propertyName = ((SqlPropertyRefScalarExpression)orderByItem.Expression).Identifier.Value; + queryStateDictionary["orderByItem"] = ((CosmosObject)lastDocument["payload"])[propertyName]; + } + + CosmosObject queryStateValue = CosmosObject.Create(queryStateDictionary); + + queryState = new QueryState(CosmosString.Create(queryStateValue.ToString())); + } + else + { + queryState = default; + } + + ImmutableDictionary.Builder additionalHeaders = ImmutableDictionary.CreateBuilder(); + additionalHeaders.Add("x-ms-documentdb-partitionkeyrangeid", "0"); + additionalHeaders.Add("x-ms-test-header", "true"); + + return Task.FromResult( + TryCatch.FromResult( + new QueryPage( + queryPageResultList, + requestCharge: 42, + activityId: Guid.NewGuid().ToString(), + cosmosQueryExecutionInfo: default, + distributionPlanSpec: default, + disallowContinuationTokenMessage: default, + additionalHeaders: additionalHeaders.ToImmutable(), + state: queryState, + streaming: default))); + } + } + + public Task> MonadicChangeFeedAsync( + FeedRangeState feedRangeState, + ChangeFeedPaginationOptions changeFeedPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + using (ITrace childTrace = trace.StartChild("Change Feed Transport", TraceComponent.Transport, TraceLevel.Info)) + { + TryCatch monadicPartitionKeyRangeId = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRangeState.FeedRange); + if (monadicPartitionKeyRangeId.Failed) + { + return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId.Exception)); + } + + int partitionKeyRangeId = monadicPartitionKeyRangeId.Result; + + if (!this.partitionKeyRangeIdToHashRange.TryGetValue( + partitionKeyRangeId, + out PartitionKeyHashRange range)) + { + return Task.FromResult(TryCatch.FromException( + new CosmosException( + message: $"PartitionKeyRangeId {partitionKeyRangeId} is gone", + statusCode: System.Net.HttpStatusCode.Gone, + subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, + activityId: Guid.NewGuid().ToString(), + requestCharge: 42))); + } + + if (!this.partitionedChanges.TryGetValue(range, out List changes)) + { + throw new InvalidOperationException("failed to find the range."); + } + + List filteredChanges = changes + .Where(change => IsRecordWithinFeedRange(change.Record, feedRangeState.FeedRange, this.partitionKeyDefinition)) + .Where(change => feedRangeState.State.Accept(ChangeFeedPredicate.Singleton, change)) + .Take((changeFeedPaginationOptions ?? ChangeFeedPaginationOptions.Default).PageSizeLimit.GetValueOrDefault(int.MaxValue)) + .ToList(); + + if (filteredChanges.Count == 0) + { + ChangeFeedState notModifiedResponseState = new ChangeFeedStateTime(DateTime.UtcNow); + return Task.FromResult( + TryCatch.FromResult( + new ChangeFeedNotModifiedPage( + requestCharge: 42, + activityId: Guid.NewGuid().ToString(), + additionalHeaders: default, + notModifiedResponseState))); + } + + Change lastChange = filteredChanges.Last(); + CosmosObject continuationToken = CosmosObject.Create( + new Dictionary() + { + { "PkRangeId", CosmosNumber64.Create(lastChange.PartitionKeyRangeId) }, + { "LSN", CosmosNumber64.Create(lastChange.LogicalSequenceNumber) } + }); + + ChangeFeedState responseState = ChangeFeedState.Continuation(continuationToken); + + List documents = new List(); + foreach (Change change in filteredChanges) + { + CosmosObject document = ConvertRecordToCosmosElement(change.Record); + documents.Add(CosmosObject.Create(document)); + } + + CosmosArray cosmosDocuments = CosmosArray.Create(documents); + CosmosNumber cosmosCount = CosmosNumber64.Create(cosmosDocuments.Count); + CosmosString cosmosRid = CosmosString.Create("AYIMAMmFOw8YAAAAAAAAAA=="); + + Dictionary responseDictionary = new Dictionary() + { + { "Documents", cosmosDocuments }, + { "_count", cosmosCount }, + { "_rid", cosmosRid }, + }; + CosmosObject cosmosResponse = CosmosObject.Create(responseDictionary); + IJsonWriter jsonWriter = Cosmos.Json.JsonWriter.Create(JsonSerializationFormat.Text); + cosmosResponse.WriteTo(jsonWriter); + byte[] result = jsonWriter.GetResult().ToArray(); + MemoryStream responseStream = new MemoryStream(result); + + return Task.FromResult( + TryCatch.FromResult( + new ChangeFeedSuccessPage( + responseStream, + requestCharge: 42, + itemCount: cosmosDocuments.Count, + activityId: Guid.NewGuid().ToString(), + additionalHeaders: default, + responseState))); + } + } + + public Task MonadicSplitAsync( + FeedRangeInternal feedRange, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (feedRange == null) + { + throw new ArgumentNullException(nameof(feedRange)); + } + + TryCatch monadicPartitionKeyRangeId = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRange); + if (monadicPartitionKeyRangeId.Failed) + { + return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId.Exception)); + } + + int partitionKeyRangeId = monadicPartitionKeyRangeId.Result; + + // Get the current range and records + if (!this.partitionKeyRangeIdToHashRange.TryGetValue( + partitionKeyRangeId, + out PartitionKeyHashRange parentRange)) + { + return Task.FromResult( + TryCatch.FromException( + new CosmosException( + message: $"PartitionKeyRangeId {partitionKeyRangeId} is gone", + statusCode: System.Net.HttpStatusCode.Gone, + subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, + activityId: Guid.NewGuid().ToString(), + requestCharge: 42))); + } + + if (!this.partitionedRecords.TryGetValue(parentRange, out Records parentRecords)) + { + throw new InvalidOperationException("failed to find the range."); + } + + if (!this.partitionedChanges.TryGetValue(parentRange, out List parentChanges)) + { + throw new InvalidOperationException("failed to find the range."); + } + + // Split the range space + PartitionKeyHashRanges partitionKeyHashRanges; + if (this.partitionKeyDefinition.Kind == PartitionKind.MultiHash && + this.partitionKeyDefinition.Paths.Count > 1) + { + //For MultiHash, to help with testing we will split using the median partition key among documents. + PartitionKeyHash midPoint = this.ComputeMedianSplitPointAmongDocumentsInPKRange(parentRange); + partitionKeyHashRanges = PartitionKeyHashRangeSplitterAndMerger.SplitRange(parentRange, midPoint); + } + else + { + partitionKeyHashRanges = PartitionKeyHashRangeSplitterAndMerger.SplitRange( + parentRange, + rangeCount: 2); + } + + // Update the partition routing map + int maxPartitionKeyRangeId = this.partitionKeyRangeIdToHashRange.Keys.Max(); + this.parentToChildMapping[partitionKeyRangeId] = (maxPartitionKeyRangeId + 1, maxPartitionKeyRangeId + 2); + Dictionary newPartitionKeyRangeIdToHashRange = new Dictionary() + { + { maxPartitionKeyRangeId + 1, partitionKeyHashRanges.First() }, + { maxPartitionKeyRangeId + 2, partitionKeyHashRanges.Last() }, + }; + + foreach (KeyValuePair kvp in this.partitionKeyRangeIdToHashRange) + { + int oldRangeId = kvp.Key; + PartitionKeyHashRange oldRange = kvp.Value; + if (!oldRange.Equals(parentRange)) + { + newPartitionKeyRangeIdToHashRange[oldRangeId] = oldRange; + } + } + + // Copy over the partitioned records (minus the parent range) + PartitionKeyHashRangeDictionary newPartitionedRecords = new PartitionKeyHashRangeDictionary( + PartitionKeyHashRanges.Create(newPartitionKeyRangeIdToHashRange.Values)); + + newPartitionedRecords[partitionKeyHashRanges.First()] = new Records(); + newPartitionedRecords[partitionKeyHashRanges.Last()] = new Records(); + + foreach (PartitionKeyHashRange range in this.partitionKeyRangeIdToHashRange.Values) + { + if (!range.Equals(parentRange)) + { + newPartitionedRecords[range] = this.partitionedRecords[range]; + } + } + + PartitionKeyHashRangeDictionary> newPartitionedChanges = new PartitionKeyHashRangeDictionary>( + PartitionKeyHashRanges.Create(newPartitionKeyRangeIdToHashRange.Values)); + + newPartitionedChanges[partitionKeyHashRanges.First()] = new List(); + newPartitionedChanges[partitionKeyHashRanges.Last()] = new List(); + + foreach (PartitionKeyHashRange range in this.partitionKeyRangeIdToHashRange.Values) + { + if (!range.Equals(parentRange)) + { + newPartitionedChanges[range] = this.partitionedChanges[range]; + } + } + + this.partitionedRecords = newPartitionedRecords; + this.partitionedChanges = newPartitionedChanges; + this.partitionKeyRangeIdToHashRange = newPartitionKeyRangeIdToHashRange; + + // Rehash the records in the parent range + foreach (Record record in parentRecords) + { + PartitionKeyHash partitionKeyHash = GetHashFromPayload(record.Payload, this.partitionKeyDefinition); + if (!this.partitionedRecords.TryGetValue(partitionKeyHash, out Records records)) + { + records = new Records(); + this.partitionedRecords[partitionKeyHash] = records; + } + + records.Add(record); + } + + // Rehash the changes in the parent range + foreach (Change change in parentChanges) + { + PartitionKeyHash partitionKeyHash = GetHashFromPayload(change.Record.Payload, this.partitionKeyDefinition); + if (!this.partitionedChanges.TryGetValue(partitionKeyHash, out List changes)) + { + changes = new List(); + this.partitionedChanges[partitionKeyHash] = changes; + } + + changes.Add(change); + } + + return Task.FromResult(TryCatch.FromResult()); + } + + public Task MonadicMergeAsync( + FeedRangeInternal feedRange1, + FeedRangeInternal feedRange2, + CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + + if (feedRange1 == null) + { + throw new ArgumentNullException(nameof(feedRange1)); + } + + if (feedRange2 == null) + { + throw new ArgumentNullException(nameof(feedRange2)); + } + + TryCatch monadicPartitionKeyRangeId1 = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRange1); + if (monadicPartitionKeyRangeId1.Failed) + { + return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId1.Exception)); + } + + int sourceRangeId1 = monadicPartitionKeyRangeId1.Result; + + TryCatch monadicPartitionKeyRangeId2 = this.MonadicGetPartitionKeyRangeIdFromFeedRange(feedRange2); + if (monadicPartitionKeyRangeId2.Failed) + { + return Task.FromResult(TryCatch.FromException(monadicPartitionKeyRangeId2.Exception)); + } + + int sourceRangeId2 = monadicPartitionKeyRangeId2.Result; + + // Get the range and records + if (!this.partitionKeyRangeIdToHashRange.TryGetValue( + sourceRangeId1, + out PartitionKeyHashRange sourceHashRange1)) + { + return Task.FromResult( + TryCatch.FromException( + new CosmosException( + message: $"PartitionKeyRangeId {sourceRangeId1} is gone", + statusCode: System.Net.HttpStatusCode.Gone, + subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, + activityId: Guid.NewGuid().ToString(), + requestCharge: 42))); + } + + if (!this.partitionedRecords.TryGetValue(sourceHashRange1, out Records sourceRecords1)) + { + throw new InvalidOperationException("failed to find the range."); + } + + if (!this.partitionedChanges.TryGetValue(sourceHashRange1, out List sourceChanges1)) + { + throw new InvalidOperationException("failed to find the range."); + } + + if (!this.partitionKeyRangeIdToHashRange.TryGetValue( + sourceRangeId2, + out PartitionKeyHashRange sourceHashRange2)) + { + return Task.FromResult( + TryCatch.FromException( + new CosmosException( + message: $"PartitionKeyRangeId {sourceRangeId2} is gone", + statusCode: System.Net.HttpStatusCode.Gone, + subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, + activityId: Guid.NewGuid().ToString(), + requestCharge: 42))); + } + + if (!this.partitionedRecords.TryGetValue(sourceHashRange2, out Records sourceRecords2)) + { + throw new InvalidOperationException("failed to find the range."); + } + + if (!this.partitionedChanges.TryGetValue(sourceHashRange2, out List sourceChanges2)) + { + throw new InvalidOperationException("failed to find the range."); + } + + // Merge the range space + TryCatch monadicRanges = PartitionKeyHashRanges.Monadic.Create(new List() + { + sourceHashRange1, + sourceHashRange2 + }); + + if (monadicRanges.Failed) + { + return Task.FromResult(TryCatch.FromException(monadicRanges.Exception)); + } + + PartitionKeyHashRange mergedHashRange = PartitionKeyHashRangeSplitterAndMerger.MergeRanges( + monadicRanges.Result); + + // Update the partition routing map + int maxPartitionKeyRangeId = this.partitionKeyRangeIdToHashRange.Keys.Max(); + Dictionary newPartitionKeyRangeIdToHashRange = new Dictionary() + { + { maxPartitionKeyRangeId + 1, mergedHashRange }, + }; + + foreach (KeyValuePair kvp in this.partitionKeyRangeIdToHashRange) + { + int oldRangeId = kvp.Key; + PartitionKeyHashRange oldRange = kvp.Value; + if (!(oldRange.Equals(sourceHashRange1) || oldRange.Equals(sourceHashRange2))) + { + newPartitionKeyRangeIdToHashRange[oldRangeId] = oldRange; + } + } + + // Copy over the partitioned records (minus the source ranges) + PartitionKeyHashRangeDictionary newPartitionedRecords = new PartitionKeyHashRangeDictionary( + PartitionKeyHashRanges.Create(newPartitionKeyRangeIdToHashRange.Values)); + + newPartitionedRecords[mergedHashRange] = new Records(); + + foreach (PartitionKeyHashRange range in this.partitionKeyRangeIdToHashRange.Values) + { + if (!(range.Equals(sourceHashRange1) || range.Equals(sourceHashRange2))) + { + newPartitionedRecords[range] = this.partitionedRecords[range]; + } + } + + PartitionKeyHashRangeDictionary> newPartitionedChanges = new PartitionKeyHashRangeDictionary>( + PartitionKeyHashRanges.Create(newPartitionKeyRangeIdToHashRange.Values)); + + newPartitionedChanges[mergedHashRange] = new List(); + + foreach (PartitionKeyHashRange range in this.partitionKeyRangeIdToHashRange.Values) + { + if (!(range.Equals(sourceHashRange1) || range.Equals(sourceHashRange2))) + { + newPartitionedChanges[range] = this.partitionedChanges[range]; + } + } + + this.partitionedRecords = newPartitionedRecords; + this.partitionedChanges = newPartitionedChanges; + this.partitionKeyRangeIdToHashRange = newPartitionKeyRangeIdToHashRange; + + // Rehash the records in the source ranges + List combinedOrderedRecords = new List(); + foreach (Records sourceRecords in new Records[] { sourceRecords1, sourceRecords2 }) + { + combinedOrderedRecords.AddRange(sourceRecords); + } + + combinedOrderedRecords = combinedOrderedRecords + .OrderBy(record => record.ResourceIdentifier.Database) + .ThenBy(record => record.ResourceIdentifier.Document) + .ToList(); + + foreach (Record record in combinedOrderedRecords) + { + PartitionKeyHash partitionKeyHash = GetHashFromPayload(record.Payload, this.partitionKeyDefinition); + if (!this.partitionedRecords.TryGetValue(partitionKeyHash, out Records records)) + { + records = new Records(); + this.partitionedRecords[partitionKeyHash] = records; + } + + records.Add(record); + } + + // Rehash the changes in the parent range + List combinedOrderedChanges = new List(); + foreach (List sourceChanges in new List[] { sourceChanges1, sourceChanges2 }) + { + combinedOrderedChanges.AddRange(sourceChanges); + } + + combinedOrderedChanges = combinedOrderedChanges + .OrderBy(change => change.PartitionKeyRangeId) + .ThenBy(change => change.LogicalSequenceNumber) + .ToList(); + + foreach (Change change in combinedOrderedChanges) + { + PartitionKeyHash partitionKeyHash = GetHashFromPayload(change.Record.Payload, this.partitionKeyDefinition); + if (!this.partitionedChanges.TryGetValue(partitionKeyHash, out List changes)) + { + changes = new List(); + this.partitionedChanges[partitionKeyHash] = changes; + } + + changes.Add(change); + } + + return Task.FromResult(TryCatch.FromResult()); + } + + public IEnumerable PartitionKeyRangeIds => this.partitionKeyRangeIdToHashRange.Keys; + + private static PartitionKeyHash GetHashFromPayload( + CosmosObject payload, + PartitionKeyDefinition partitionKeyDefinition) + { + IList partitionKey = GetPartitionKeysFromPayload(payload, partitionKeyDefinition); + return GetHashFromPartitionKeys(partitionKey, partitionKeyDefinition); + } + + private static PartitionKeyHash GetHashFromObjectModel( + Cosmos.PartitionKey payload, + PartitionKeyDefinition partitionKeyDefinition) + { + IList partitionKeys = GetPartitionKeysFromObjectModel(payload); + return GetHashFromPartitionKeys(partitionKeys, partitionKeyDefinition); + } + + private static CosmosElement GetPartitionKeyFromPayload(CosmosObject payload, PartitionKeyDefinition partitionKeyDefinition) + { + // Restrict the partition key definition for now to keep things simple + if (partitionKeyDefinition.Kind != PartitionKind.Hash) + { + throw new ArgumentOutOfRangeException("Can only support hash partitioning"); + } + + if (partitionKeyDefinition.Version != Documents.PartitionKeyDefinitionVersion.V2) + { + throw new ArgumentOutOfRangeException("Can only support hash v2"); + } + + if (partitionKeyDefinition.Paths.Count != 1) + { + throw new ArgumentOutOfRangeException("Can only support a single partition key path."); + } + + IEnumerable tokens = partitionKeyDefinition.Paths[0].Split("/").Skip(1); + + CosmosElement partitionKey = payload; + foreach (string token in tokens) + { + if (partitionKey != default) + { + if (!payload.TryGetValue(token, out partitionKey)) + { + partitionKey = default; + } + } + } + + return partitionKey; + } + + private static IList GetPartitionKeysFromPayload(CosmosObject payload, PartitionKeyDefinition partitionKeyDefinition) + { + // Restrict the partition key definition for now to keep things simple + if (partitionKeyDefinition.Kind != PartitionKind.MultiHash && partitionKeyDefinition.Kind != PartitionKind.Hash) + { + throw new ArgumentOutOfRangeException("Can only support Hash/MultiHash partitioning"); + } + + if (partitionKeyDefinition.Version != Documents.PartitionKeyDefinitionVersion.V2) + { + throw new ArgumentOutOfRangeException("Can only support hash v2"); + } + + IList cosmosElements = new List(); + foreach (string partitionKeyPath in partitionKeyDefinition.Paths) + { + IEnumerable tokens = partitionKeyPath.Split("/").Skip(1); + CosmosElement partitionKey = payload; + foreach (string token in tokens) + { + if (partitionKey != default) + { + if (!payload.TryGetValue(token, out partitionKey)) + { + partitionKey = default; + } + } + } + cosmosElements.Add(partitionKey); + } + return cosmosElements; + } + + private static IList GetPartitionKeysFromObjectModel(Cosmos.PartitionKey payload) + { + CosmosArray partitionKeyPayload = CosmosArray.Parse(payload.ToJsonString()); + List cosmosElemementPayload = new List(); + foreach (CosmosElement element in partitionKeyPayload) + { + cosmosElemementPayload.Add(element); + } + return cosmosElemementPayload; + } + + private static PartitionKeyHash GetHashFromPartitionKeys(IList partitionKeys, PartitionKeyDefinition partitionKeyDefinition) + { + // Restrict the partition key definition for now to keep things simple + if (partitionKeyDefinition.Kind != PartitionKind.MultiHash && partitionKeyDefinition.Kind != PartitionKind.Hash) + { + throw new ArgumentOutOfRangeException("Can only support Hash/MultiHash partitioning"); + } + + if (partitionKeyDefinition.Version != Documents.PartitionKeyDefinitionVersion.V2) + { + throw new ArgumentOutOfRangeException("Can only support hash v2"); + } + + IList partitionKeyHashValues = new List(); + + foreach (CosmosElement partitionKey in partitionKeys) + { + if (partitionKey is CosmosArray cosmosArray) + { + foreach (CosmosElement element in cosmosArray) + { + PartitionKeyHash elementHash = element switch + { + null => PartitionKeyHash.V2.HashUndefined(), + CosmosString stringPartitionKey => PartitionKeyHash.V2.Hash(stringPartitionKey.Value), + CosmosNumber numberPartitionKey => PartitionKeyHash.V2.Hash(Number64.ToDouble(numberPartitionKey.Value)), + CosmosBoolean cosmosBoolean => PartitionKeyHash.V2.Hash(cosmosBoolean.Value), + CosmosNull _ => PartitionKeyHash.V2.HashNull(), + _ => throw new ArgumentOutOfRangeException(), + }; + partitionKeyHashValues.Add(elementHash.HashValues[0]); + } + continue; + } + + PartitionKeyHash partitionKeyHash = partitionKey switch + { + null => PartitionKeyHash.V2.HashUndefined(), + CosmosString stringPartitionKey => PartitionKeyHash.V2.Hash(stringPartitionKey.Value), + CosmosNumber numberPartitionKey => PartitionKeyHash.V2.Hash(Number64.ToDouble(numberPartitionKey.Value)), + CosmosBoolean cosmosBoolean => PartitionKeyHash.V2.Hash(cosmosBoolean.Value), + CosmosNull _ => PartitionKeyHash.V2.HashNull(), + _ => throw new ArgumentOutOfRangeException(), + }; + partitionKeyHashValues.Add(partitionKeyHash.HashValues[0]); + } + + return new PartitionKeyHash(partitionKeyHashValues.ToArray()); + } + + private static CosmosObject ConvertRecordToCosmosElement(Record record) + { + Dictionary keyValuePairs = new Dictionary + { + ["_rid"] = CosmosString.Create(record.ResourceIdentifier.ToString()), + ["_ts"] = CosmosNumber64.Create(record.Timestamp.Ticks), + ["id"] = CosmosString.Create(record.Identifier) + }; + + foreach (KeyValuePair property in record.Payload) + { + keyValuePairs[property.Key] = property.Value; + } + + return CosmosObject.Create(keyValuePairs); + } + + private static bool IsRecordWithinFeedRange( + Record record, + FeedRange feedRange, + PartitionKeyDefinition partitionKeyDefinition) + { + if (feedRange is FeedRangePartitionKey feedRangePartitionKey) + { + IList partitionKey = GetPartitionKeysFromObjectModel(feedRangePartitionKey.PartitionKey); + IList partitionKeyFromRecord = GetPartitionKeysFromPayload(record.Payload, partitionKeyDefinition); + if (partitionKeyDefinition.Kind == PartitionKind.MultiHash) + { + PartitionKeyHash partitionKeyHash = GetHashFromPartitionKeys(partitionKey, partitionKeyDefinition); + PartitionKeyHash partitionKeyFromRecordHash = GetHashFromPartitionKeys(partitionKeyFromRecord, partitionKeyDefinition); + + return partitionKeyHash.Equals(partitionKeyFromRecordHash) || partitionKeyFromRecordHash.Value.StartsWith(partitionKeyHash.Value); + } + return partitionKey.SequenceEqual(partitionKeyFromRecord); + } + else if (feedRange is FeedRangeEpk feedRangeEpk) + { + PartitionKeyHashRange hashRange = FeedRangeEpkToHashRange(feedRangeEpk); + PartitionKeyHash hash = GetHashFromPayload(record.Payload, partitionKeyDefinition); + return hashRange.Contains(hash); + } + else if (feedRange is FeedRangePartitionKeyRange) + { + return true; + } + else + { + throw new NotImplementedException(); + } + } + + private TryCatch MonadicGetPartitionKeyRangeIdFromFeedRange(FeedRange feedRange) + { + int partitionKeyRangeId; + if (feedRange is FeedRangeEpk feedRangeEpk) + { + // Check to see if any of the system ranges contain the user range. + List matchIds; + if (feedRangeEpk.Range.Min.Equals(FeedRangeEpk.FullRange.Range.Min) && feedRangeEpk.Range.Max.Equals(FeedRangeEpk.FullRange.Range.Max)) + { + matchIds = this.PartitionKeyRangeIds.ToList(); + } + else + { + PartitionKeyHashRange hashRange = FeedRangeEpkToHashRange(feedRangeEpk); + matchIds = this.partitionKeyRangeIdToHashRange + .Where(kvp => kvp.Value.Contains(hashRange)) + .Select(kvp => kvp.Key) + .ToList(); + } + + if (matchIds.Count != 1) + { + // Simulate a split exception, since we don't have a partition key range id to route to. + CosmosException goneException = new CosmosException( + message: $"Epk Range: {feedRangeEpk.Range} is gone.", + statusCode: System.Net.HttpStatusCode.Gone, + subStatusCode: (int)SubStatusCodes.PartitionKeyRangeGone, + activityId: Guid.NewGuid().ToString(), + requestCharge: default); + + return TryCatch.FromException(goneException); + } + + partitionKeyRangeId = matchIds[0]; + } + else if (feedRange is FeedRangePartitionKeyRange feedRangePartitionKeyRange) + { + partitionKeyRangeId = int.Parse(feedRangePartitionKeyRange.PartitionKeyRangeId); + } + else if (feedRange is FeedRangePartitionKey feedRangePartitionKey) + { + PartitionKeyHash partitionKeyHash = GetHashFromObjectModel(feedRangePartitionKey.PartitionKey, this.partitionKeyDefinition); + + int? foundValue = null; + foreach (KeyValuePair kvp in this.partitionKeyRangeIdToHashRange) + { + if (kvp.Value.Contains(partitionKeyHash)) + { + foundValue = kvp.Key; + } + } + + if (!foundValue.HasValue) + { + throw new InvalidOperationException("Failed to find value"); + } + + partitionKeyRangeId = foundValue.Value; + } + else + { + throw new NotImplementedException("Unknown feed range type"); + } + + return TryCatch.FromResult(partitionKeyRangeId); + } + + private static PartitionKeyHashRange FeedRangeEpkToHashRange(FeedRangeEpk feedRangeEpk) + { + PartitionKeyHash? start = feedRangeEpk.Range.Min == string.Empty ? (PartitionKeyHash?)null : PartitionKeyHash.Parse(feedRangeEpk.Range.Min); + PartitionKeyHash? end = feedRangeEpk.Range.Max == string.Empty || feedRangeEpk.Range.Max == "FF" ? (PartitionKeyHash?)null : PartitionKeyHash.Parse(feedRangeEpk.Range.Max); + PartitionKeyHashRange hashRange = new PartitionKeyHashRange(start, end); + return hashRange; + } + + private static FeedRangeEpk HashRangeToFeedRangeEpk(PartitionKeyHashRange hashRange) + { + return new FeedRangeEpk( + new Documents.Routing.Range( + min: hashRange.StartInclusive.HasValue ? hashRange.StartInclusive.ToString() : string.Empty, + max: hashRange.EndExclusive.HasValue ? hashRange.EndExclusive.ToString() : string.Empty, + isMinInclusive: true, + isMaxInclusive: false)); + } + + private PartitionKeyHash ComputeMedianSplitPointAmongDocumentsInPKRange(PartitionKeyHashRange hashRange) + { + if (!this.partitionedRecords.TryGetValue(hashRange, out Records parentRecords)) + { + throw new InvalidOperationException("failed to find the range."); + } + + List partitionKeyHashes = new List(); + foreach (Record record in parentRecords) + { + PartitionKeyHash partitionKeyHash = GetHashFromPayload(record.Payload, this.partitionKeyDefinition); + partitionKeyHashes.Add(partitionKeyHash); + } + + partitionKeyHashes.Sort(); + PartitionKeyHash medianPkHash = partitionKeyHashes[partitionKeyHashes.Count / 2]; + + // For MultiHash Collection, split at top level to ensure documents for top level key exist across partitions + // after split + if (medianPkHash.HashValues.Count > 1) + { + return new PartitionKeyHash(medianPkHash.HashValues[0]); + } + + return medianPkHash; + } + public Task> MonadicGetResourceIdentifierAsync(ITrace trace, CancellationToken cancellationToken) + { + return Task.FromResult(TryCatch.FromResult("AYIMAMmFOw8YAAAAAAAAAA==")); + } + + private sealed class Records : IReadOnlyList + { + private readonly List storage; + + public Records() + { + this.storage = new List(); + } + + public Record this[int index] => this.storage[index]; + + public int Count => this.storage.Count; + + public IEnumerator GetEnumerator() => this.storage.GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() => this.storage.GetEnumerator(); + + public Record Add(int pkrangeid, CosmosObject payload) + { + // using pkrangeid for database since resource id doesnt serialize both document and pkrangeid. + ResourceId currentResourceId; + if (this.Count == 0) + { + currentResourceId = ResourceId.Parse("AYIMAMmFOw8YAAAAAAAAAA=="); + + PropertyInfo documentProp = currentResourceId + .GetType() + .GetProperty("Document", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); + documentProp.SetValue(currentResourceId, (ulong)1); + + PropertyInfo databaseProp = currentResourceId + .GetType() + .GetProperty("Database", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); + databaseProp.SetValue(currentResourceId, (uint)pkrangeid + 1); + } + else + { + currentResourceId = this.storage[this.storage.Count - 1].ResourceIdentifier; + } + + ResourceId nextResourceId = ResourceId.Parse("AYIMAMmFOw8YAAAAAAAAAA=="); + { + PropertyInfo documentProp = nextResourceId + .GetType() + .GetProperty("Document", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); + documentProp.SetValue(nextResourceId, (ulong)(currentResourceId.Document + 1)); + + PropertyInfo databaseProp = nextResourceId + .GetType() + .GetProperty("Database", BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.Instance); + databaseProp.SetValue(nextResourceId, (uint)pkrangeid + 1); + } + + Record record = new Record(nextResourceId, DateTime.UtcNow, Guid.NewGuid().ToString(), payload); + this.storage.Add(record); + return record; + } + + public Record Add(Record record) + { + this.storage.Add(record); + return record; + } + } + + private readonly struct Change + { + public Change(Record record, ulong partitionKeyRangeId, ulong logicalSequenceNumber) + { + this.Record = record ?? throw new ArgumentNullException(nameof(record)); + this.PartitionKeyRangeId = partitionKeyRangeId; + this.LogicalSequenceNumber = logicalSequenceNumber; + } + + public Record Record { get; } + public ulong PartitionKeyRangeId { get; } + public ulong LogicalSequenceNumber { get; } + } + + private sealed class ChangeFeedPredicate : IChangeFeedStateVisitor + { + public static readonly ChangeFeedPredicate Singleton = new ChangeFeedPredicate(); + + private ChangeFeedPredicate() + { + } + + public bool Visit(ChangeFeedStateBeginning changeFeedStateBeginning, Change input) => true; + + public bool Visit(ChangeFeedStateTime changeFeedStateTime, Change input) => input.Record.Timestamp >= changeFeedStateTime.StartTime; + + public bool Visit(ChangeFeedStateContinuation changeFeedStateContinuation, Change input) + { + CosmosObject continuation = (CosmosObject)changeFeedStateContinuation.ContinuationToken; + + if (!continuation.TryGetValue("PkRangeId", out CosmosNumber pkRangeIdCosmosElement)) + { + throw new InvalidOperationException("failed to get pkrange id"); + } + + ulong pkRangeId = (ulong)Number64.ToLong(pkRangeIdCosmosElement.Value); + + if (!continuation.TryGetValue("LSN", out CosmosNumber lsnCosmosElement)) + { + throw new InvalidOperationException("failed to get lsn"); + } + + ulong lsn = (ulong)Number64.ToLong(lsnCosmosElement.Value); + + int pkRangeIdCompare = input.PartitionKeyRangeId.CompareTo(pkRangeId); + if (pkRangeIdCompare < 0) + { + return false; + } + else if (pkRangeIdCompare > 0) + { + return true; + } + else + { + return input.LogicalSequenceNumber > lsn; + } + } + + public bool Visit(ChangeFeedStateNow changeFeedStateNow, Change input) + { + DateTime now = DateTime.UtcNow; + ChangeFeedStateTime startTime = new ChangeFeedStateTime(now); + return this.Visit(startTime, input); + } + } + + private sealed class CosmosElementToSqlScalarExpressionVisitor : ICosmosElementVisitor + { + public static readonly CosmosElementToSqlScalarExpressionVisitor Singleton = new CosmosElementToSqlScalarExpressionVisitor(); + + private CosmosElementToSqlScalarExpressionVisitor() + { + // Private constructor, since this class is a singleton. + } + + public SqlScalarExpression Visit(CosmosArray cosmosArray) + { + List items = new List(); + foreach (CosmosElement item in cosmosArray) + { + items.Add(item.Accept(this)); + } + + return SqlArrayCreateScalarExpression.Create(items.ToImmutableArray()); + } + + public SqlScalarExpression Visit(CosmosBinary cosmosBinary) + { + // Can not convert binary to scalar expression without knowing the API type. + throw new NotImplementedException(); + } + + public SqlScalarExpression Visit(CosmosBoolean cosmosBoolean) + { + return SqlLiteralScalarExpression.Create(SqlBooleanLiteral.Create(cosmosBoolean.Value)); + } + + public SqlScalarExpression Visit(CosmosGuid cosmosGuid) + { + // Can not convert guid to scalar expression without knowing the API type. + throw new NotImplementedException(); + } + + public SqlScalarExpression Visit(CosmosNull cosmosNull) + { + return SqlLiteralScalarExpression.Create(SqlNullLiteral.Create()); + } + + public SqlScalarExpression Visit(CosmosNumber cosmosNumber) + { + if (!(cosmosNumber is CosmosNumber64 cosmosNumber64)) + { + throw new ArgumentException($"Unknown {nameof(CosmosNumber)} type: {cosmosNumber.GetType()}."); + } + + return SqlLiteralScalarExpression.Create(SqlNumberLiteral.Create(cosmosNumber64.GetValue())); + } + + public SqlScalarExpression Visit(CosmosObject cosmosObject) + { + List properties = new List(); + foreach (KeyValuePair prop in cosmosObject) + { + SqlPropertyName name = SqlPropertyName.Create(prop.Key); + CosmosElement value = prop.Value; + SqlScalarExpression expression = value.Accept(this); + SqlObjectProperty property = SqlObjectProperty.Create(name, expression); + properties.Add(property); + } + + return SqlObjectCreateScalarExpression.Create(properties.ToImmutableArray()); + } + + public SqlScalarExpression Visit(CosmosString cosmosString) + { + return SqlLiteralScalarExpression.Create(SqlStringLiteral.Create(cosmosString.Value)); + } + + public SqlScalarExpression Visit(CosmosUndefined cosmosUndefined) + { + return SqlLiteralScalarExpression.Create(SqlUndefinedLiteral.Create()); + } + } + } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/SinglePartitionPartitionRangeEnumeratorTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/SinglePartitionPartitionRangeEnumeratorTests.cs index cf63ef8f10..8288148aab 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/SinglePartitionPartitionRangeEnumeratorTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Pagination/SinglePartitionPartitionRangeEnumeratorTests.cs @@ -80,9 +80,9 @@ public async Task TestSplitAsync() new ReadFeedPartitionRangeEnumerator( inMemoryCollection, feedRangeState: new FeedRangeState(ranges[0], ReadFeedState.Beginning()), - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), - NoOpTrace.Singleton); + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10)), + NoOpTrace.Singleton, + cancellationToken: default); (HashSet parentIdentifiers, ReadFeedState state) = await this.PartialDrainAsync(enumerator, numIterations: 3); @@ -106,8 +106,7 @@ public async Task TestSplitAsync() (feedRangeState) => new ReadFeedPartitionRangeEnumerator( inMemoryCollection, feedRangeState: feedRangeState, - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10)), trace: NoOpTrace.Singleton); HashSet resourceIdentifiers = await this.DrainFullyAsync(enumerable); @@ -134,8 +133,7 @@ protected override IAsyncEnumerable> CreateEnumerable( (feedRangeState) => new ReadFeedPartitionRangeEnumerator( documentContainer, feedRangeState: feedRangeState, - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default), + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10)), trace: NoOpTrace.Singleton); } @@ -152,9 +150,9 @@ protected override Task>> CreateEnumerat feedRangeState: new FeedRangeState( new FeedRangePartitionKeyRange(partitionKeyRangeId: "0"), state ?? ReadFeedState.Beginning()), - readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: cancellationToken), - trace: NoOpTrace.Singleton); + readFeedPaginationOptions: new ReadFeedPaginationOptions(pageSizeHint: 10)), + trace: NoOpTrace.Singleton, + cancellationToken: default); return Task.FromResult(enumerator); } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/ContinuationTokens/OrderByQueryResultTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/ContinuationTokens/OrderByQueryResultTests.cs index 3ca62e4057..4208dcee34 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/ContinuationTokens/OrderByQueryResultTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/ContinuationTokens/OrderByQueryResultTests.cs @@ -33,7 +33,8 @@ public void TestOrderByUndefined() memoryStream, Documents.ResourceType.Document, out CosmosArray documents, - out CosmosObject distributionPlan); + out CosmosObject distributionPlan, + out bool? streaming); List orderByQueryResults = documents.Select(x => new OrderByQueryResult(x)).ToList(); Assert.AreEqual(14, orderByQueryResults.Count); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/OptimisticDirectExecutionQueryBaselineTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/OptimisticDirectExecutionQueryBaselineTests.cs index bfbdda891c..7e697260f1 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/OptimisticDirectExecutionQueryBaselineTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/OptimisticDirectExecutionQueryBaselineTests.cs @@ -169,9 +169,9 @@ public async Task TestPipelineForBackendDocumentsOnSinglePartitionAsync() QueryRequestOptions queryRequestOptions = GetQueryRequestOptions(enableOptimisticDirectExecution: true); DocumentContainer inMemoryCollection = await CreateDocumentContainerAsync(numItems, multiPartition: false); - IQueryPipelineStage queryPipelineStage = await GetOdePipelineAsync(input, inMemoryCollection, queryRequestOptions); + IQueryPipelineStage queryPipelineStage = CreateOdePipeline(input, inMemoryCollection, queryRequestOptions); - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { Assert.AreEqual(TestInjections.PipelineType.OptimisticDirectExecution, queryRequestOptions.TestSettings.Stats.PipelineType.Value); @@ -221,7 +221,7 @@ public async Task TestOdeTokenWithSpecializedPipeline() string expectedErrorMessage = "Execution of this query using the supplied continuation token requires EnableOptimisticDirectExecution to be set in QueryRequestOptions. " + "If the error persists after that, contact system administrator."; - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { if (queryPipelineStage.Current.Failed) { @@ -576,7 +576,7 @@ private async Task ValidateErrorMessageWithModifiedOdeFlags(OptimisticDirectExec } [TestMethod] - public async Task TestTextDistributionPlanParsingFromStream() + public void TestTextDistributionPlanParsingFromStream() { string textPath = "../../../Query/DistributionPlans/Text"; string[] filePaths = Directory.GetFiles(textPath); @@ -597,7 +597,8 @@ public async Task TestTextDistributionPlanParsingFromStream() memoryStream, Documents.ResourceType.Document, out CosmosArray documents, - out CosmosObject distributionPlan); + out CosmosObject distributionPlan, + out bool? ignored); if (distributionPlan.TryGetValue("backendDistributionPlan", out CosmosElement backendDistributionPlan) && distributionPlan.TryGetValue("clientDistributionPlan", out CosmosElement clientDistributionPlan)) @@ -613,7 +614,7 @@ public async Task TestTextDistributionPlanParsingFromStream() } [TestMethod] - public async Task TestBinaryDistributionPlanParsingFromStream() + public void TestBinaryDistributionPlanParsingFromStream() { string expectedBackendPlan = "{\"query\":\"\\nSELECT Count(r.a) AS count_a\\nFROM r\",\"obfuscatedQuery\":\"{\\\"query\\\":\\\"SELECT Count(r.a) AS p1\\\\nFROM r\\\",\\\"parameters\\\":[]}\",\"shape\":\"{\\\"Select\\\":{\\\"Type\\\":\\\"List\\\",\\\"AggCount\\\":1},\\\"From\\\":{\\\"Expr\\\":\\\"Aliased\\\"}}\",\"signature\":-4885972563975185329,\"shapeSignature\":-6171928203673877984,\"queryIL\":{\"Expression\":{\"Kind\":\"Aggregate\",\"Type\":{\"Kind\":\"Enum\",\"ItemType\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Number\",\"ExcludesUndefined\":true}},\"Aggregate\":{\"Kind\":\"Builtin\",\"Signature\":{\"ItemType\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Variant\",\"ExcludesUndefined\":false},\"ResultType\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Number\",\"ExcludesUndefined\":true}},\"OperatorKind\":\"Count\"},\"SourceExpression\":{\"Kind\":\"Select\",\"Type\":{\"Kind\":\"Enum\",\"ItemType\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Variant\",\"ExcludesUndefined\":false}},\"Delegate\":{\"Kind\":\"ScalarExpression\",\"Type\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Variant\",\"ExcludesUndefined\":false},\"DeclaredVariable\":{\"Name\":\"v0\",\"UniqueId\":0,\"Type\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Variant\",\"ExcludesUndefined\":true}},\"Expression\":{\"Kind\":\"PropertyRef\",\"Type\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Variant\",\"ExcludesUndefined\":false},\"Expression\":{\"Kind\":\"VariableRef\",\"Type\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Variant\",\"ExcludesUndefined\":true},\"Variable\":{\"Name\":\"v0\",\"UniqueId\":0,\"Type\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Variant\",\"ExcludesUndefined\":true}}},\"PropertyName\":\"a\"}},\"SourceExpression\":{\"Kind\":\"Input\",\"Type\":{\"Kind\":\"Enum\",\"ItemType\":{\"Kind\":\"Base\",\"BaseTypeKind\":\"Variant\",\"ExcludesUndefined\":true}},\"Name\":\"r\"}}}},\"noSpatial\":true,\"language\":\"QueryIL\"}"; string expectedClientPlan = "{\"clientQL\":{\"Kind\":\"Select\",\"DeclaredVariable\":{\"Name\":\"v0\",\"UniqueId\":2},\"Expression\":{\"Kind\":\"ObjectCreate\",\"ObjectKind\":\"Object\",\"Properties\":[{\"Name\":\"count_a\",\"Expression\":{\"Kind\":\"VariableRef\",\"Variable\":{\"Name\":\"v0\",\"UniqueId\":2}}}]},\"SourceExpression\":{\"Kind\":\"Aggregate\",\"Aggregate\":{\"Kind\":\"Builtin\",\"OperatorKind\":\"Sum\"},\"SourceExpression\":{\"Kind\":\"Input\",\"Name\":\"root\"}}}}"; @@ -627,7 +628,8 @@ public async Task TestBinaryDistributionPlanParsingFromStream() memoryStream, Documents.ResourceType.Document, out CosmosArray documents, - out CosmosObject distributionPlan); + out CosmosObject distributionPlan, + out bool? streaming); if (distributionPlan.TryGetValue("backendDistributionPlan", out CosmosElement backendDistributionPlan) && distributionPlan.TryGetValue("clientDistributionPlan", out CosmosElement clientDistributionPlan)) @@ -649,7 +651,7 @@ private static async Task ExecuteGoneExceptionOnODEPipeline(bool isMultiPa QueryRequestOptions queryRequestOptions = GetQueryRequestOptions(enableOptimisticDirectExecution: true); (MergeTestUtil mergeTest, IQueryPipelineStage queryPipelineStage) = await CreateFallbackPipelineTestInfrastructure(numItems, isFailedFallbackPipelineTest: false, isMultiPartition, queryRequestOptions); - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { if (mergeTest.MoveNextCounter == 1) { @@ -682,7 +684,7 @@ private static async Task TestHandlingOfFailedFallbackPipeline(bool isMult QueryRequestOptions queryRequestOptions = GetQueryRequestOptions(enableOptimisticDirectExecution: true); (MergeTestUtil mergeTest, IQueryPipelineStage queryPipelineStage) = await CreateFallbackPipelineTestInfrastructure(numItems, isFailedFallbackPipelineTest: true, isMultiPartition, queryRequestOptions); - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetPage = queryPipelineStage.Current; if (tryGetPage.Failed) @@ -729,9 +731,9 @@ private static string RemoveWhitespace(string jsonString) failureConfigs: new FlakyDocumentContainer.FailureConfigs( inject429s: false, injectEmptyPages: false, - shouldReturnFailure: mergeTest.ShouldReturnFailure)); + shouldReturnFailure: () => Task.FromResult(mergeTest.ShouldReturnFailure()))); - IQueryPipelineStage queryPipelineStage = await GetOdePipelineAsync(input, inMemoryCollection, queryRequestOptions); + IQueryPipelineStage queryPipelineStage = CreateOdePipeline(input, inMemoryCollection, queryRequestOptions); return (mergeTest, queryPipelineStage); } @@ -742,9 +744,9 @@ private async Task GetPipelineAndDrainAsync(OptimisticDirectExecutionTestIn List documents = new List(); QueryRequestOptions queryRequestOptions = GetQueryRequestOptions(enableOptimisticDirectExecution); DocumentContainer inMemoryCollection = await CreateDocumentContainerAsync(numItems, multiPartition: isMultiPartition, requiresDist: requiresDist); - IQueryPipelineStage queryPipelineStage = await GetOdePipelineAsync(input, inMemoryCollection, queryRequestOptions, clientDisableOde); + IQueryPipelineStage queryPipelineStage = CreateOdePipeline(input, inMemoryCollection, queryRequestOptions, clientDisableOde); - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetPage = queryPipelineStage.Current; tryGetPage.ThrowIfFailed(); @@ -775,7 +777,7 @@ private async Task GetPipelineAndDrainAsync(OptimisticDirectExecutionTestIn partitionKeyValue: input.PartitionKeyValue, continuationToken: tryGetPage.Result.State.Value); - queryPipelineStage = await GetOdePipelineAsync(input, inMemoryCollection, queryRequestOptions); + queryPipelineStage = CreateOdePipeline(input, inMemoryCollection, queryRequestOptions); } continuationTokenCount++; @@ -803,7 +805,7 @@ internal static Tuple Get return Tuple.Create(partitionedQueryExecutionInfo, queryPartitionProvider); } - private static async Task GetOdePipelineAsync(OptimisticDirectExecutionTestInput input, DocumentContainer documentContainer, QueryRequestOptions queryRequestOptions, bool clientDisableOde = false) + private static IQueryPipelineStage CreateOdePipeline(OptimisticDirectExecutionTestInput input, DocumentContainer documentContainer, QueryRequestOptions queryRequestOptions, bool clientDisableOde = false) { (CosmosQueryExecutionContextFactory.InputParameters inputParameters, CosmosQueryContextCore cosmosQueryContextCore) = CreateInputParamsAndQueryContext(input, queryRequestOptions, clientDisableOde); IQueryPipelineStage queryPipelineStage = CosmosQueryExecutionContextFactory.Create( @@ -953,7 +955,7 @@ public override OptimisticDirectExecutionTestOutput ExecuteTest(OptimisticDirect inputParameters, NoOpTrace.Singleton); - bool result = queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton).AsTask().GetAwaiter().GetResult(); + bool result = queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default).AsTask().GetAwaiter().GetResult(); if (input.ExpectedOptimisticDirectExecution) { @@ -1077,16 +1079,16 @@ public override async Task> MonadicQueryAsync( queryPage.Result.Result.Documents, requestCharge: 42, activityId: Guid.NewGuid().ToString(), - responseLengthInBytes: 1337, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: additionalHeaders.ToImmutable(), - state: queryPage.Result.Result.State))); + state: queryPage.Result.Result.State, + streaming: default))); } } - private class MergeTestUtil + private sealed class MergeTestUtil { public int MoveNextCounter { get; private set; } @@ -1101,7 +1103,7 @@ public MergeTestUtil(bool isFailedFallbackPipelineTest) this.IsFailedFallbackPipelineTest = isFailedFallbackPipelineTest; } - public async Task ShouldReturnFailure() + public Exception ShouldReturnFailure() { this.MoveNextCounter++; if (this.MoveNextCounter == 2 && !this.GoneExceptionCreated) @@ -1269,9 +1271,9 @@ public override Task GetCachedContainerQueryProperties Cosmos.GeospatialType.Geometry)); } - public override async Task GetClientDisableOptimisticDirectExecutionAsync() + public override Task GetClientDisableOptimisticDirectExecutionAsync() { - return this.queryPartitionProvider.ClientDisableOptimisticDirectExecution; + return Task.FromResult(this.queryPartitionProvider.ClientDisableOptimisticDirectExecution); } public override Task> GetTargetPartitionKeyRangeByFeedRangeAsync(string resourceLink, string collectionResourceId, PartitionKeyDefinition partitionKeyDefinition, FeedRangeInternal feedRangeInternal, bool forceRefresh, ITrace trace) @@ -1294,14 +1296,14 @@ public override Task> TryGetOverlappingRangesAs throw new NotImplementedException(); } - public override async Task> TryGetPartitionedQueryExecutionInfoAsync(SqlQuerySpec sqlQuerySpec, ResourceType resourceType, PartitionKeyDefinition partitionKeyDefinition, bool requireFormattableOrderByQuery, bool isContinuationExpected, bool allowNonValueAggregateQuery, bool hasLogicalPartitionKey, bool allowDCount, bool useSystemPrefix, Cosmos.GeospatialType geospatialType, CancellationToken cancellationToken) + public override Task> TryGetPartitionedQueryExecutionInfoAsync(SqlQuerySpec sqlQuerySpec, ResourceType resourceType, PartitionKeyDefinition partitionKeyDefinition, bool requireFormattableOrderByQuery, bool isContinuationExpected, bool allowNonValueAggregateQuery, bool hasLogicalPartitionKey, bool allowDCount, bool useSystemPrefix, Cosmos.GeospatialType geospatialType, CancellationToken cancellationToken) { - CosmosSerializerCore serializerCore = new(); - using StreamReader streamReader = new(serializerCore.ToStreamSqlQuerySpec(sqlQuerySpec, Documents.ResourceType.Document)); + CosmosSerializerCore serializerCore = new CosmosSerializerCore(); + using StreamReader streamReader = new StreamReader(serializerCore.ToStreamSqlQuerySpec(sqlQuerySpec, Documents.ResourceType.Document)); string sqlQuerySpecJsonString = streamReader.ReadToEnd(); (PartitionedQueryExecutionInfo partitionedQueryExecutionInfo, QueryPartitionProvider queryPartitionProvider) = OptimisticDirectExecutionQueryBaselineTests.GetPartitionedQueryExecutionInfoAndPartitionProvider(sqlQuerySpecJsonString, partitionKeyDefinition); - return TryCatch.FromResult(partitionedQueryExecutionInfo); + return Task.FromResult(TryCatch.FromResult(partitionedQueryExecutionInfo)); } } } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/OrderByQueryPartitionRangePageAsyncEnumeratorTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/OrderByQueryPartitionRangePageAsyncEnumeratorTests.cs index b08f01e317..5c96a8ff77 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/OrderByQueryPartitionRangePageAsyncEnumeratorTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/OrderByQueryPartitionRangePageAsyncEnumeratorTests.cs @@ -57,15 +57,16 @@ protected override Task>> CreateEnum Assert.AreEqual(1, ranges.Count); IAsyncEnumerator> enumerator = new TracingAsyncEnumerator>( - new OrderByQueryPartitionRangePageAsyncEnumerator( + OrderByQueryPartitionRangePageAsyncEnumerator.Create( queryDataSource: documentContainer, sqlQuerySpec: new Cosmos.Query.Core.SqlQuerySpec("SELECT * FROM c"), feedRangeState: new FeedRangeState(ranges[0], state), partitionKey: null, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), - filter: "filter", - cancellationToken: cancellationToken), - NoOpTrace.Singleton); + filter: "filter", + PrefetchPolicy.PrefetchSinglePage), + NoOpTrace.Singleton, + cancellationToken); return Task.FromResult(enumerator); } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/AggregateQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/AggregateQueryPipelineStageTests.cs index 72e08e606f..61dba34d02 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/AggregateQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/AggregateQueryPipelineStageTests.cs @@ -111,8 +111,7 @@ private static async Task> CreateAndDrain( orderedAliases: orderedAliases, hasSelectValue: hasSelectValue, continuationToken: continuationToken, - cancellationToken: default, - monadicCreatePipelineStage: (CosmosElement continuationToken, CancellationToken cancellationToken) => TryCatch.FromResult(source)); + monadicCreatePipelineStage: (CosmosElement continuationToken) => TryCatch.FromResult(source)); Assert.IsTrue(tryCreateAggregateQueryPipelineStage.Succeeded); IQueryPipelineStage aggregateQueryPipelineStage = tryCreateAggregateQueryPipelineStage.Result; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/AggressivePrefetchPipelineTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/AggressivePrefetchPipelineTests.cs index 102e532477..2cc702b6bd 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/AggressivePrefetchPipelineTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/AggressivePrefetchPipelineTests.cs @@ -242,12 +242,12 @@ public override async Task> MonadicQueryAsync( documents: documents, requestCharge: 3.0, activityId: "E7980B1F-436E-44DF-B7A5-655C56D38648", - responseLengthInBytes: 48, cosmosQueryExecutionInfo: new Lazy(() => new CosmosQueryExecutionInfo(false, false)), distributionPlanSpec: default, disallowContinuationTokenMessage: null, additionalHeaders: null, - state: continuationToken); + state: continuationToken, + streaming: default); return TryCatch.FromResult(page); } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/CrossPartitionRangePageAsyncEnumerable.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/CrossPartitionRangePageAsyncEnumerable.cs index 7b96480a5f..1bc2c1ec28 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/CrossPartitionRangePageAsyncEnumerable.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/CrossPartitionRangePageAsyncEnumerable.cs @@ -53,9 +53,9 @@ public IAsyncEnumerator>> GetAsyncEnu this.comparer, this.maxConcurrency, this.prefetchPolicy, - cancellationToken, this.state), - this.trace); + this.trace, + cancellationToken); } } } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/DCountQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/DCountQueryPipelineStageTests.cs index 89fce6f1e7..93dc5b9459 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/DCountQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/DCountQueryPipelineStageTests.cs @@ -200,7 +200,7 @@ private static async Task> CreateAndDrainWithStateAsync( distinctQueryType: distinctQueryType, dcountAlias: dcountAlias); - if(!await stage.MoveNextAsync(NoOpTrace.Singleton)) + if(!await stage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { break; } @@ -222,22 +222,20 @@ private static IQueryPipelineStage Create( DistinctQueryType distinctQueryType, string dcountAlias) { - MonadicCreatePipelineStage source = (CosmosElement continuationToken, CancellationToken cancellationToken) => + MonadicCreatePipelineStage source = (CosmosElement continuationToken) => TryCatch.FromResult(MockQueryPipelineStage.Create(pages, continuationToken)); - MonadicCreatePipelineStage createDistinctQueryPipelineStage = (CosmosElement continuationToken, CancellationToken cancellationToken) => + MonadicCreatePipelineStage createDistinctQueryPipelineStage = (CosmosElement continuationToken) => DistinctQueryPipelineStage.MonadicCreate( executionEnvironment: executionEnvironment, requestContinuation: continuationToken, distinctQueryType: distinctQueryType, - cancellationToken: cancellationToken, monadicCreatePipelineStage: source); TryCatch tryCreateDCountQueryPipelineStage = DCountQueryPipelineStage.MonadicCreate( executionEnvironment: executionEnvironment, continuationToken: requestContinuationToken, info: new DCountInfo { DCountAlias = dcountAlias }, - cancellationToken: default, monadicCreatePipelineStage: createDistinctQueryPipelineStage); Assert.IsTrue(tryCreateDCountQueryPipelineStage.Succeeded); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/DistinctQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/DistinctQueryPipelineStageTests.cs index 9b9ade94c6..f71559e11e 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/DistinctQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/DistinctQueryPipelineStageTests.cs @@ -220,8 +220,7 @@ private static async Task> CreateAndDrainAsync( executionEnvironment: executionEnvironment, requestContinuation: continuationToken, distinctQueryType: distinctQueryType, - cancellationToken: default, - monadicCreatePipelineStage: (CosmosElement continuationToken, CancellationToken cancellationToken) => TryCatch.FromResult(source)); + monadicCreatePipelineStage: (CosmosElement continuationToken) => TryCatch.FromResult(source)); Assert.IsTrue(tryCreateDistinctQueryPipelineStage.Succeeded); IQueryPipelineStage distinctQueryPipelineStage = tryCreateDistinctQueryPipelineStage.Result; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/EnumerableStage.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/EnumerableStage.cs index 7d45f5ff38..8c6a4236e1 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/EnumerableStage.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/EnumerableStage.cs @@ -28,7 +28,7 @@ public EnumerableStage(IQueryPipelineStage stage, ITrace trace) public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken cancellationToken = default) { - return new TracingAsyncEnumerator>(this.stage, this.trace); + return new TracingAsyncEnumerator>(this.stage, this.trace, cancellationToken); } } } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/FactoryTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/FactoryTests.cs index 21f87f2014..54e5b99899 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/FactoryTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/FactoryTests.cs @@ -31,7 +31,6 @@ public void TestCreate() queryInfo: new QueryInfo() { }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - requestCancellationToken: default, requestContinuationToken: default); ; Assert.IsTrue(monadicCreatePipeline.Succeeded); } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/FullPipelineTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/FullPipelineTests.cs index 97598ea59f..68b5b7d9cd 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/FullPipelineTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/FullPipelineTests.cs @@ -66,7 +66,7 @@ public async Task TestMerge() List elements = new List(); int iteration = 0; - while (await pipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await pipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetQueryPage = pipelineStage.Current; tryGetQueryPage.ThrowIfFailed(); @@ -245,7 +245,7 @@ public async Task Tracing() int numTraces = (await documentContainer.GetFeedRangesAsync(NoOpTrace.Singleton, default)).Count; using (rootTrace = Trace.GetRootTrace("Cross Partition Query")) { - while (await pipelineStage.MoveNextAsync(rootTrace)) + while (await pipelineStage.MoveNextAsync(rootTrace, cancellationToken: default)) { TryCatch tryGetQueryPage = pipelineStage.Current; tryGetQueryPage.ThrowIfFailed(); @@ -308,7 +308,7 @@ private async Task TestPageSizeAsync(string query, int expectedPageSize, int exp NoOpTrace.Singleton); List elements = new List(); - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetQueryPage = queryPipelineStage.Current; tryGetQueryPage.ThrowIfFailed(); @@ -480,7 +480,7 @@ internal static async Task> DrainWithoutStateAsync(string qu IQueryPipelineStage pipelineStage = await CreatePipelineAsync(documentContainer, query, pageSize); List elements = new List(); - while (await pipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await pipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetQueryPage = pipelineStage.Current; tryGetQueryPage.ThrowIfFailed(); @@ -501,7 +501,7 @@ private static async Task> DrainWithStateAsync(string query, { pipelineStage = await CreatePipelineAsync(documentContainer, query, pageSize, state); - if (!await pipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + if (!await pipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { break; } @@ -591,7 +591,6 @@ private static async Task CreatePipelineAsync( GetQueryPlan(query), queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: pageSize), maxConcurrency: 10, - requestCancellationToken: default, requestContinuationToken: state); tryCreatePipeline.ThrowIfFailed(); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/GroupByQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/GroupByQueryPipelineStageTests.cs index c9197c48fd..da5a5a7648 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/GroupByQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/GroupByQueryPipelineStageTests.cs @@ -58,8 +58,7 @@ private static async Task> CreateAndDrainAsync( TryCatch tryCreateGroupByStage = GroupByQueryPipelineStage.MonadicCreate( executionEnvironment: executionEnvironment, continuationToken: continuationToken, - cancellationToken: default, - monadicCreatePipelineStage: (CosmosElement continuationToken, CancellationToken cancellationToken) => TryCatch.FromResult(source), + monadicCreatePipelineStage: (CosmosElement continuationToken) => TryCatch.FromResult(source), aggregates: new AggregateOperator[] { }, groupByAliasToAggregateType: groupByAliasToAggregateType, orderedAliases: orderedAliases, diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/MockQueryPipelineStage.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/MockQueryPipelineStage.cs index 282b025031..389022f8af 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/MockQueryPipelineStage.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/MockQueryPipelineStage.cs @@ -13,7 +13,6 @@ namespace Microsoft.Azure.Cosmos.Tests.Query.Pipeline using Microsoft.Azure.Cosmos.Query.Core.Monads; using Microsoft.Azure.Cosmos.Query.Core.Pipeline; using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; - using Microsoft.Azure.Cosmos.Tests.Query.OfflineEngineTests; using Microsoft.Azure.Cosmos.Tracing; internal sealed class MockQueryPipelineStage : QueryPipelineStageBase @@ -23,7 +22,7 @@ internal sealed class MockQueryPipelineStage : QueryPipelineStageBase public long PageIndex { get; private set; } public MockQueryPipelineStage(IReadOnlyList> pages) - : base(EmptyQueryPipelineStage.Singleton, cancellationToken: default) + : base(EmptyQueryPipelineStage.Singleton) { this.pages = pages ?? throw new ArgumentNullException(nameof(pages)); } @@ -43,7 +42,7 @@ public static MockQueryPipelineStage Create( return stage; } - public override ValueTask MoveNextAsync(ITrace trace) + public override ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { if (this.PageIndex == this.pages.Count) { @@ -57,12 +56,12 @@ public override ValueTask MoveNextAsync(ITrace trace) documents: documents, requestCharge: default, activityId: Guid.NewGuid().ToString(), - responseLengthInBytes: default, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: default, - state: state); + state: state, + streaming: default); this.Current = TryCatch.FromResult(page); return new ValueTask(true); } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/NonStreamingOrderByQueryTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/NonStreamingOrderByQueryTests.cs new file mode 100644 index 0000000000..daac63b9b1 --- /dev/null +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/NonStreamingOrderByQueryTests.cs @@ -0,0 +1,1004 @@ +//------------------------------------------------------------ +// Copyright (c) Microsoft Corporation. All rights reserved. +//------------------------------------------------------------ + +namespace Microsoft.Azure.Cosmos.Tests.Query.Pipeline +{ + using Microsoft.Azure.Cosmos.ChangeFeed.Pagination; + using Microsoft.Azure.Cosmos.CosmosElements; + using Microsoft.Azure.Cosmos.Pagination; + using Microsoft.Azure.Cosmos.Query.Core.Monads; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.CrossPartition.OrderBy; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline.Pagination; + using Microsoft.Azure.Cosmos.Query.Core.Pipeline; + using Microsoft.Azure.Cosmos.Query.Core; + using Microsoft.Azure.Cosmos.ReadFeed.Pagination; + using Microsoft.Azure.Cosmos.Tests.Pagination; + using Microsoft.Azure.Cosmos.Tracing; + using Microsoft.Azure.Cosmos; + using Microsoft.VisualStudio.TestTools.UnitTesting; + using System.Collections.Generic; + using System.Diagnostics; + using System.Threading.Tasks; + using System.Threading; + using System; + using System.Linq; + using Microsoft.Azure.Cosmos.CosmosElements.Numbers; + + [TestClass] + public class NonStreamingOrderByQueryTests + { + private const int MaxConcurrency = 10; + + private const int DocumentCount = 420; + + private const int LeafPageCount = 100; + + private const int PageSize = 10; + + private const string ActivityId = "ActivityId"; + + private const int QueryCharge = 42; + + private const string CollectionRid = "1HNeAM-TiQY="; + + private const string RId = "_rid"; + + private const string OrderByItems = "orderByItems"; + + private const string Payload = "payload"; + + private const string Item = "item"; + + private const string Index = "index"; + + private const string IndexString = "indexString"; + + private static readonly int[] PageSizes = new [] { 1, 10, 100, DocumentCount }; + + [TestMethod] + public async Task InMemoryContainerParityTests() + { + IDocumentContainer documentContainer = await CreateDocumentContainerAsync(DocumentCount); + + IReadOnlyList idColumnAsc = new List + { + new OrderByColumn("c.id", SortOrder.Ascending) + }; + + IReadOnlyList idColumnDesc = new List + { + new OrderByColumn("c.id", SortOrder.Descending) + }; + + IReadOnlyList testCases = new List + { + MakeTest( + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.id}] AS orderByItems, c AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.id", + orderByColumns: idColumnAsc, + validate: result => Validate.IndexIsInOrder(result, propertyName: "id", DocumentCount, reversed: false)), + MakeTest( + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.id}] AS orderByItems, c AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.id DESC", + orderByColumns: idColumnDesc, + validate: result => Validate.IndexIsInOrder(result, propertyName: "id",DocumentCount, reversed: true)), + + // Empty result set + MakeTest( + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.id}] AS orderByItems, c AS payload + FROM c + WHERE c.doesNotExist = true AND {documentdb-formattableorderbyquery-filter} + ORDER BY c.id", + orderByColumns: idColumnAsc, + validate: result => result.Count == 0), + MakeTest( + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.id}] AS orderByItems, c AS payload + FROM c + WHERE c.doesNotExist = true AND {documentdb-formattableorderbyquery-filter} + ORDER BY c.id DESC", + orderByColumns: idColumnDesc, + validate: result => result.Count == 0), + }; + + await RunParityTests( + documentContainer, + new NonStreamingDocumentContainer(documentContainer), + await documentContainer.GetFeedRangesAsync(NoOpTrace.Singleton, default), + testCases); + } + + [TestMethod] + public async Task ShufflingContainerParityTests() + { + static bool IndexIsInOrder(IReadOnlyList result, bool reversed) + { + return Validate.IndexIsInOrder(result, propertyName: Index, LeafPageCount * PageSize, reversed); + } + + IReadOnlyList testCases = new List + { + MakeParityTest( + feedMode: PartitionedFeedMode.NonStreaming, + documentCreationMode: DocumentCreationMode.SingleItem, + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.index}] AS orderByItems, c AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.index", + orderByColumns: new List + { + new OrderByColumn($"c.{Index}", SortOrder.Ascending) + }, + validate: result => IndexIsInOrder(result, reversed: false)), + MakeParityTest( + feedMode: PartitionedFeedMode.NonStreamingReversed, + documentCreationMode: DocumentCreationMode.SingleItem, + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.index}] AS orderByItems, c AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.index DESC", + orderByColumns: new List + { + new OrderByColumn($"c.{Index}", SortOrder.Descending) + }, + validate: result => IndexIsInOrder(result, reversed: true)), + MakeParityTest( + feedMode: PartitionedFeedMode.NonStreaming, + documentCreationMode: DocumentCreationMode.MultiItem, + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.index}, {""item"": c.indexString}] AS orderByItems, c AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.index, c.indexString", + orderByColumns: new List + { + new OrderByColumn($"c.{Index}", SortOrder.Ascending), + new OrderByColumn($"c.{IndexString}", SortOrder.Ascending) + }, + validate: result => IndexIsInOrder(result, reversed: false)), + MakeParityTest( + feedMode: PartitionedFeedMode.NonStreamingReversed, + documentCreationMode: DocumentCreationMode.MultiItem, + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.index}, {""item"": c.indexString}] AS orderByItems, c AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.index DESC, c.indexString DESC", + orderByColumns: new List + { + new OrderByColumn($"c.{Index}", SortOrder.Descending), + new OrderByColumn($"c.{IndexString}", SortOrder.Descending) + }, + validate: result => IndexIsInOrder(result, reversed: true)), + MakeParityTest( + feedMode: PartitionedFeedMode.NonStreaming, + documentCreationMode: DocumentCreationMode.MultiItemSwapped, + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.indexString}, {""item"": c.index}] AS orderByItems, c AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.indexString, c.index", + orderByColumns: new List + { + new OrderByColumn($"c.{IndexString}", SortOrder.Ascending), + new OrderByColumn($"c.{Index}", SortOrder.Ascending), + }, + validate: result => IndexIsInOrder(result, reversed: false)), + MakeParityTest( + feedMode: PartitionedFeedMode.NonStreamingReversed, + documentCreationMode: DocumentCreationMode.MultiItemSwapped, + queryText: @" + SELECT c._rid AS _rid, [{""item"": c.indexString}, {""item"": c.index}] AS orderByItems, c AS payload + FROM c + WHERE {documentdb-formattableorderbyquery-filter} + ORDER BY c.indexString DESC, c.index DESC", + orderByColumns: new List + { + new OrderByColumn($"c.{IndexString}", SortOrder.Descending), + new OrderByColumn($"c.{Index}", SortOrder.Descending), + }, + validate: result => IndexIsInOrder(result, reversed: true)), + }; + + await RunParityTests(testCases); + } + + private static async Task RunParityTests( + IDocumentContainer documentContainer, + IDocumentContainer nonStreamingDocumentContainer, + IReadOnlyList ranges, + IReadOnlyList testCases) + { + foreach (TestCase testCase in testCases) + { + foreach (int pageSize in testCase.PageSizes) + { + IReadOnlyList nonStreamingResult = await CreateAndRunPipelineStage( + documentContainer: nonStreamingDocumentContainer, + ranges: ranges, + queryText: testCase.QueryText, + orderByColumns: testCase.OrderByColumns, + pageSize: pageSize); + + IReadOnlyList streamingResult = await CreateAndRunPipelineStage( + documentContainer: documentContainer, + ranges: ranges, + queryText: testCase.QueryText, + orderByColumns: testCase.OrderByColumns, + pageSize: pageSize); + + if (!streamingResult.SequenceEqual(nonStreamingResult)) + { + Assert.Fail($"Results mismatch for query:\n{testCase.QueryText}\npageSize: {pageSize}"); + } + + if (!testCase.Validate(nonStreamingResult)) + { + Assert.Fail($"Could not validate result for query:\n{testCase.QueryText}\npageSize: {pageSize}"); + } + } + } + } + + private static async Task> CreateAndRunPipelineStage( + IDocumentContainer documentContainer, + IReadOnlyList ranges, + string queryText, + IReadOnlyList orderByColumns, + int pageSize) + { + TryCatch pipelineStage = OrderByCrossPartitionQueryPipelineStage.MonadicCreate( + documentContainer: documentContainer, + sqlQuerySpec: new SqlQuerySpec(queryText), + targetRanges: ranges, + partitionKey: null, + orderByColumns: orderByColumns, + queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: pageSize), + maxConcurrency: MaxConcurrency, + continuationToken: null); + + Assert.IsTrue(pipelineStage.Succeeded); + + IQueryPipelineStage stage = pipelineStage.Result; + List documents = new List(); + while (await stage.MoveNextAsync(NoOpTrace.Singleton, default)) + { + Assert.IsTrue(stage.Current.Succeeded); + Assert.IsTrue(stage.Current.Result.Documents.Count <= pageSize); + DebugTraceHelpers.TracePipelineStagePage(stage.Current.Result); + documents.AddRange(stage.Current.Result.Documents); + } + + return documents; + } + + private static async Task RunParityTests(IReadOnlyList testCases) + { + foreach (ParityTestCase testCase in testCases) + { + IReadOnlyList ranges = new List + { + new FeedRangeEpk(new Documents.Routing.Range(string.Empty, "AA", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("AA", "BB", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("BB", "CC", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("CC", "DD", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("DD", "EE", true, false)), + new FeedRangeEpk(new Documents.Routing.Range("EE", "FF", true, false)), + }; + + IDocumentContainer nonStreamingDocumentContainer = MockDocumentContainer.Create(ranges, testCase.FeedMode, testCase.DocumentCreationMode); + + IDocumentContainer streamingDocumentContainer = MockDocumentContainer.Create( + ranges, + testCase.FeedMode & PartitionedFeedMode.StreamingReversed, + testCase.DocumentCreationMode); + + foreach (int pageSize in testCase.PageSizes) + { + DebugTraceHelpers.TraceNonStreamingPipelineStarting(); + IReadOnlyList nonStreamingResult = await CreateAndRunPipelineStage( + documentContainer: nonStreamingDocumentContainer, + ranges: ranges, + queryText: testCase.QueryText, + orderByColumns: testCase.OrderByColumns, + pageSize: pageSize); + + DebugTraceHelpers.TraceStreamingPipelineStarting(); + IReadOnlyList streamingResult = await CreateAndRunPipelineStage( + documentContainer: streamingDocumentContainer, + ranges: ranges, + queryText: testCase.QueryText, + orderByColumns: testCase.OrderByColumns, + pageSize: pageSize); + + if (!streamingResult.SequenceEqual(nonStreamingResult)) + { + Assert.Fail($"Results mismatch for query:\n{testCase.QueryText}\npageSize: {pageSize}"); + } + } + } + } + + private static TestCase MakeTest(string queryText, IReadOnlyList orderByColumns, Func, bool> validate) + { + return MakeTest(queryText, orderByColumns, PageSizes, validate); + } + + private static TestCase MakeTest( + string queryText, + IReadOnlyList orderByColumns, + int[] pageSizes, + Func, bool> validate) + { + return new TestCase(queryText, orderByColumns, pageSizes, validate); + } + + private class TestCase + { + public string QueryText { get; } + + public IReadOnlyList OrderByColumns { get; } + + public int[] PageSizes { get; } + + public Func, bool> Validate { get; } + + public TestCase( + string queryText, + IReadOnlyList orderByColumns, + int[] pageSizes, + Func, bool> validate) + { + this.QueryText = queryText; + this.OrderByColumns = orderByColumns; + this.PageSizes = pageSizes; + this.Validate = validate; + } + } + + private static ParityTestCase MakeParityTest( + PartitionedFeedMode feedMode, + DocumentCreationMode documentCreationMode, + string queryText, + IReadOnlyList orderByColumns, + Func, bool> validate) + { + return MakeParityTest(feedMode, documentCreationMode, queryText, orderByColumns, PageSizes, validate); + } + + private static ParityTestCase MakeParityTest( + PartitionedFeedMode feedMode, + DocumentCreationMode documentCreationMode, + string queryText, + IReadOnlyList orderByColumns, + int[] pageSizes, + Func, bool> validate) + { + return new ParityTestCase(feedMode, documentCreationMode, queryText, orderByColumns, pageSizes, validate); + } + + private sealed class ParityTestCase : TestCase + { + public PartitionedFeedMode FeedMode { get; } + + public DocumentCreationMode DocumentCreationMode { get; } + + public ParityTestCase( + PartitionedFeedMode feedMode, + DocumentCreationMode documentCreationMode, + string queryText, + IReadOnlyList orderByColumns, + int[] pageSizes, + Func, bool> validate) + : base(queryText, orderByColumns, pageSizes, validate) + { + this.FeedMode = feedMode; + this.DocumentCreationMode = documentCreationMode; + } + } + + private static class Validate + { + public static bool IndexIsInOrder(IReadOnlyList documents, string propertyName, int count, bool reversed) + { + List expected = Enumerable + .Range(0, count) + .ToList(); + + if (reversed) + { + expected.Reverse(); + } + + IEnumerable actual = documents + .Cast() + .Select(x => x[propertyName]) + .Cast() + .Select(x => (int)Number64.ToLong(x.Value)); + + return expected.SequenceEqual(actual); + } + } + + private sealed class NonStreamingDocumentContainer : IDocumentContainer + { + private readonly IDocumentContainer inner; + + public NonStreamingDocumentContainer(IDocumentContainer inner) + { + this.inner = inner ?? throw new ArgumentNullException(nameof(inner)); + } + + public Task ChangeFeedAsync( + FeedRangeState feedRangeState, + ChangeFeedPaginationOptions changeFeedPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + return this.inner.ChangeFeedAsync(feedRangeState, changeFeedPaginationOptions, trace, cancellationToken); + } + + public Task CreateItemAsync(CosmosObject payload, CancellationToken cancellationToken) + { + return this.inner.CreateItemAsync(payload, cancellationToken); + } + + public Task> GetChildRangeAsync( + FeedRangeInternal feedRange, + ITrace trace, + CancellationToken cancellationToken) + { + return this.inner.GetChildRangeAsync(feedRange, trace, cancellationToken); + } + + public Task> GetFeedRangesAsync(ITrace trace, CancellationToken cancellationToken) + { + return this.inner.GetFeedRangesAsync(trace, cancellationToken); + } + + public Task GetResourceIdentifierAsync(ITrace trace, CancellationToken cancellationToken) + { + return this.inner.GetResourceIdentifierAsync(trace, cancellationToken); + } + + public Task MergeAsync(FeedRangeInternal feedRange1, FeedRangeInternal feedRange2, CancellationToken cancellationToken) + { + return this.inner.MergeAsync(feedRange1, feedRange2, cancellationToken); + } + + public Task> MonadicChangeFeedAsync( + FeedRangeState feedRangeState, + ChangeFeedPaginationOptions changeFeedPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + return this.inner.MonadicChangeFeedAsync(feedRangeState, changeFeedPaginationOptions, trace, cancellationToken); + } + + public Task> MonadicCreateItemAsync(CosmosObject payload, CancellationToken cancellationToken) + { + return this.inner.MonadicCreateItemAsync(payload, cancellationToken); + } + + public Task>> MonadicGetChildRangeAsync( + FeedRangeInternal feedRange, + ITrace trace, + CancellationToken cancellationToken) + { + return this.inner.MonadicGetChildRangeAsync(feedRange, trace, cancellationToken); + } + + public Task>> MonadicGetFeedRangesAsync(ITrace trace, CancellationToken cancellationToken) + { + return this.inner.MonadicGetFeedRangesAsync(trace, cancellationToken); + } + + public Task> MonadicGetResourceIdentifierAsync(ITrace trace, CancellationToken cancellationToken) + { + return this.inner.MonadicGetResourceIdentifierAsync(trace, cancellationToken); + } + + public Task MonadicMergeAsync( + FeedRangeInternal feedRange1, + FeedRangeInternal feedRange2, + CancellationToken cancellationToken) + { + return this.inner.MonadicMergeAsync(feedRange1, feedRange2, cancellationToken); + } + + public async Task> MonadicQueryAsync( + SqlQuerySpec sqlQuerySpec, + FeedRangeState feedRangeState, + QueryPaginationOptions queryPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + TryCatch queryPage = await this.inner.MonadicQueryAsync(sqlQuerySpec, feedRangeState, queryPaginationOptions, trace, cancellationToken); + + if (queryPage.Failed) + { + return queryPage; + } + + QueryPage page = queryPage.Result; + DebugTraceHelpers.TraceBackendResponse(page); + + return TryCatch.FromResult(new QueryPage( + page.Documents, + page.RequestCharge, + page.ActivityId, + page.CosmosQueryExecutionInfo, + page.DistributionPlanSpec, + page.DisallowContinuationTokenMessage, + page.AdditionalHeaders, + page.State, + streaming: false)); + } + + public Task> MonadicReadFeedAsync( + FeedRangeState feedRangeState, + ReadFeedPaginationOptions readFeedPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + return this.inner.MonadicReadFeedAsync(feedRangeState, readFeedPaginationOptions, trace, cancellationToken); + } + + public Task> MonadicReadItemAsync( + CosmosElement partitionKey, + string identifer, + CancellationToken cancellationToken) + { + return this.inner.MonadicReadItemAsync(partitionKey, identifer, cancellationToken); + } + + public Task MonadicRefreshProviderAsync(ITrace trace, CancellationToken cancellationToken) + { + return this.inner.MonadicRefreshProviderAsync(trace, cancellationToken); + } + + public Task MonadicSplitAsync(FeedRangeInternal feedRange, CancellationToken cancellationToken) + { + return this.inner.MonadicSplitAsync(feedRange, cancellationToken); + } + + public async Task QueryAsync( + SqlQuerySpec sqlQuerySpec, + FeedRangeState feedRangeState, + QueryPaginationOptions queryPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + TryCatch queryPage = await this.MonadicQueryAsync( + sqlQuerySpec, + feedRangeState, + queryPaginationOptions, + trace, + cancellationToken); + queryPage.ThrowIfFailed(); + return queryPage.Result; + } + + public Task ReadFeedAsync( + FeedRangeState feedRangeState, + ReadFeedPaginationOptions readFeedPaginationOptions, + ITrace trace, + CancellationToken cancellationToken) + { + return this.inner.ReadFeedAsync(feedRangeState, readFeedPaginationOptions, trace, cancellationToken); + } + + public Task ReadItemAsync(CosmosElement partitionKey, string identifier, CancellationToken cancellationToken) + { + return this.inner.ReadItemAsync(partitionKey, identifier, cancellationToken); + } + + public Task RefreshProviderAsync(ITrace trace, CancellationToken cancellationToken) + { + return this.inner.RefreshProviderAsync(trace, cancellationToken); + } + + public Task SplitAsync(FeedRangeInternal feedRange, CancellationToken cancellationToken) + { + return this.inner.SplitAsync(feedRange, cancellationToken); + } + } + + private static class DebugTraceHelpers + { + private const bool Enabled = false; + + [Conditional("DEBUG")] + public static void TraceNonStreamingPipelineStarting() + { + if (Enabled) + { + System.Diagnostics.Trace.WriteLine("\nStarting non streaming pipeline\n"); + } + } + + [Conditional("DEBUG")] + public static void TraceStreamingPipelineStarting() + { + if (Enabled) + { + System.Diagnostics.Trace.WriteLine("\nStarting streaming pipeline\n"); + } + } + + [Conditional("DEBUG")] + public static void TracePipelineStagePage(QueryPage page) + { + if (Enabled) + { + System.Diagnostics.Trace.WriteLine("\nReceived next page from pipeline: "); + TracePage(page); + } + } + + [Conditional("DEBUG")] + public static void TraceBackendResponse(QueryPage page) + { + if (Enabled) + { + System.Diagnostics.Trace.WriteLine("Serving query from backend: "); + TracePage(page); + } + } + + [Conditional("DEBUG")] + public static void TracePage(QueryPage page) + { + if (Enabled) + { + System.Diagnostics.Trace.WriteLine("Page:"); + System.Diagnostics.Trace.WriteLine($" ActivityId: {page.ActivityId}"); + System.Diagnostics.Trace.WriteLine($" RequestCharge: {page.RequestCharge}"); + System.Diagnostics.Trace.WriteLine($" ActivityId: {page.ActivityId}"); + + System.Diagnostics.Trace.WriteLine($" AdditionalHeaders: "); + foreach (KeyValuePair header in page.AdditionalHeaders) + { + System.Diagnostics.Trace.WriteLine($" [{header.Key}] = {header.Value}"); + } + + System.Diagnostics.Trace.WriteLine($" Results:"); + foreach (CosmosElement result in page.Documents) + { + System.Diagnostics.Trace.WriteLine($" {result}"); + } + } + } + } + + private class MockDocumentContainer : IDocumentContainer + { + private readonly IReadOnlyDictionary>> pages; + + private readonly bool streaming; + + public static IDocumentContainer Create(IReadOnlyList feedRanges, PartitionedFeedMode feedMode, DocumentCreationMode documentCreationMode) + { + IReadOnlyDictionary>> pages = CreatePartitionedFeed( + feedRanges, + LeafPageCount, + PageSize, + feedMode, + (index) => CreateDocument(index, documentCreationMode)); + return new MockDocumentContainer(pages, !feedMode.HasFlag(PartitionedFeedMode.NonStreaming)); + } + + private MockDocumentContainer(IReadOnlyDictionary>> pages, bool streaming) + { + this.pages = pages ?? throw new ArgumentNullException(nameof(pages)); + this.streaming = streaming; + } + + public Task ChangeFeedAsync(FeedRangeState feedRangeState, ChangeFeedPaginationOptions changeFeedPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task CreateItemAsync(CosmosObject payload, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> GetChildRangeAsync(FeedRangeInternal feedRange, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> GetFeedRangesAsync(ITrace trace, CancellationToken cancellationToken) + { + return Task.FromResult(this.pages.Keys.Cast().ToList()); + } + + public Task GetResourceIdentifierAsync(ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task MergeAsync(FeedRangeInternal feedRange1, FeedRangeInternal feedRange2, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> MonadicChangeFeedAsync(FeedRangeState feedRangeState, ChangeFeedPaginationOptions changeFeedPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> MonadicCreateItemAsync(CosmosObject payload, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task>> MonadicGetChildRangeAsync(FeedRangeInternal feedRange, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task>> MonadicGetFeedRangesAsync(ITrace trace, CancellationToken cancellationToken) + { + return Task.FromResult(TryCatch>.FromResult(this.pages.Keys.Cast().ToList())); + } + + public Task> MonadicGetResourceIdentifierAsync(ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task MonadicMergeAsync(FeedRangeInternal feedRange1, FeedRangeInternal feedRange2, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> MonadicQueryAsync(SqlQuerySpec sqlQuerySpec, FeedRangeState feedRangeState, QueryPaginationOptions queryPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + IReadOnlyList> feedRangePages = this.pages[feedRangeState.FeedRange]; + int index = feedRangeState.State == null ? 0 : int.Parse(((CosmosString)feedRangeState.State.Value).Value); + IReadOnlyList documents = feedRangePages[index]; + + QueryState state = index < feedRangePages.Count - 1 ? new QueryState(CosmosString.Create((index + 1).ToString())) : null; + QueryPage queryPage = new QueryPage( + documents: documents, + requestCharge: QueryCharge, + activityId: ActivityId, + cosmosQueryExecutionInfo: null, + distributionPlanSpec: null, + disallowContinuationTokenMessage: null, + additionalHeaders: null, + state: state, + streaming: this.streaming); + + return Task.FromResult(TryCatch.FromResult(queryPage)); + } + + public Task> MonadicReadFeedAsync(FeedRangeState feedRangeState, ReadFeedPaginationOptions readFeedPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task> MonadicReadItemAsync(CosmosElement partitionKey, string identifer, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task MonadicRefreshProviderAsync(ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task MonadicSplitAsync(FeedRangeInternal feedRange, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public async Task QueryAsync(SqlQuerySpec sqlQuerySpec, FeedRangeState feedRangeState, QueryPaginationOptions queryPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + TryCatch queryPage = await this.MonadicQueryAsync(sqlQuerySpec, feedRangeState, queryPaginationOptions, trace, cancellationToken); + return queryPage.Result; + } + + public Task ReadFeedAsync(FeedRangeState feedRangeState, ReadFeedPaginationOptions readFeedPaginationOptions, ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task ReadItemAsync(CosmosElement partitionKey, string identifier, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task RefreshProviderAsync(ITrace trace, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + + public Task SplitAsync(FeedRangeInternal feedRange, CancellationToken cancellationToken) + { + throw new NotImplementedException(); + } + } + + [Flags] + enum PartitionedFeedMode + { + Streaming = 0, + NonStreaming = 1, + Reversed = 2, + + StreamingReversed = Streaming | Reversed, + NonStreamingReversed = NonStreaming | Reversed, + } + + private static IReadOnlyDictionary>> CreatePartitionedFeed( + IReadOnlyList feedRanges, + int leafPageCount, + int pageSize, + PartitionedFeedMode mode, + Func createDocument) + { + int feedRangeIndex = 0; + Dictionary>> pages = new Dictionary>>(); + foreach (FeedRangeEpk feedRange in feedRanges) + { + int index = feedRangeIndex; + List> leafPages = new List>(leafPageCount); + for (int pageIndex = 0; pageIndex < leafPageCount; ++pageIndex) + { + List documents = new List(pageSize); + for (int documentCount = 0; documentCount < pageSize; ++documentCount) + { + documents.Add(createDocument(index)); + index += feedRanges.Count; + } + + if (mode.HasFlag(PartitionedFeedMode.Reversed)) + { + documents.Reverse(); + } + + leafPages.Add(documents); + } + + if (mode.HasFlag(PartitionedFeedMode.NonStreaming)) + { + FischerYatesShuffle(leafPages); + } + + if (mode == PartitionedFeedMode.StreamingReversed) + { + leafPages.Reverse(); + } + + pages.Add(feedRange, leafPages); + ++feedRangeIndex; + } + + return pages; + } + + [Flags] + enum DocumentCreationMode + { + SingleItem = 0, + MultiItem = 1, + Swapped = 2, + + MultiItemSwapped = MultiItem | Swapped, + } + + private static CosmosElement CreateDocument(int index, DocumentCreationMode mode) + { + CosmosElement indexElement = CosmosNumber64.Create(index); + CosmosElement indexStringElement = CosmosString.Create(index.ToString("D4")); + + List orderByItems = new List + { + CosmosObject.Create(new Dictionary + { + [Item] = indexElement + }) + }; + + if (mode.HasFlag(DocumentCreationMode.MultiItem)) + { + orderByItems.Add(CosmosObject.Create(new Dictionary + { + [Item] = indexStringElement + })); + } + + if (mode.HasFlag(DocumentCreationMode.Swapped)) + { + orderByItems.Reverse(); + } + + Dictionary payload = new Dictionary + { + [Index] = indexElement + }; + + if (mode.HasFlag(DocumentCreationMode.MultiItem)) + { + payload.Add(IndexString, indexStringElement); + } + + Documents.ResourceId resourceId = Documents.ResourceId.NewCollectionChildResourceId( + CollectionRid, + (ulong)index, + Documents.ResourceType.Document); + + CosmosElement document = CosmosObject.Create(new Dictionary + { + [RId] = CosmosString.Create(resourceId.ToString()), + [OrderByItems] = CosmosArray.Create(orderByItems), + [Payload] = CosmosObject.Create(payload) + }); + + return document; + } + + private static void FischerYatesShuffle(IList list) + { + Random random = new Random(); + for (int index = list.Count - 1; index > 0; --index) + { + int other = random.Next(index + 1); + T temp = list[index]; + list[index] = list[other]; + list[other] = temp; + } + } + + private static async Task CreateDocumentContainerAsync(int documentCount) + { + Documents.PartitionKeyDefinition partitionKeyDefinition = new Documents.PartitionKeyDefinition() + { + Paths = new System.Collections.ObjectModel.Collection() + { + "/id" + }, + Kind = Documents.PartitionKind.Hash, + Version = Documents.PartitionKeyDefinitionVersion.V2, + }; + + IMonadicDocumentContainer monadicDocumentContainer = new InMemoryContainer(partitionKeyDefinition); + DocumentContainer documentContainer = new DocumentContainer(monadicDocumentContainer); + + for (int i = 0; i < 3; i++) + { + await documentContainer.RefreshProviderAsync(NoOpTrace.Singleton, cancellationToken: default); + IReadOnlyList ranges = await documentContainer.GetFeedRangesAsync( + trace: NoOpTrace.Singleton, + cancellationToken: default); + foreach (FeedRangeInternal range in ranges) + { + await documentContainer.SplitAsync(range, cancellationToken: default); + } + } + + for (int i = 0; i < documentCount; i++) + { + // Insert an item + CosmosObject item = CosmosObject.Parse($"{{\"id\": {i}, \"repeated\": {i % 5} }}"); + TryCatch monadicCreateRecord = await documentContainer.MonadicCreateItemAsync(item, cancellationToken: default); + Assert.IsTrue(monadicCreateRecord.Succeeded); + } + + return documentContainer; + } + } +} \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/OrderByCrossPartitionQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/OrderByCrossPartitionQueryPipelineStageTests.cs index af4ccc4eae..eebd3b1b83 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/OrderByCrossPartitionQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/OrderByCrossPartitionQueryPipelineStageTests.cs @@ -5,7 +5,7 @@ namespace Microsoft.Azure.Cosmos.Tests.Query.Pipeline { using System; - using System.Collections.Generic; + using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; using Microsoft.Azure.Cosmos.CosmosElements; @@ -78,7 +78,6 @@ public void MonadicCreate_NullContinuationToken() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: null); Assert.IsTrue(monadicCreate.Succeeded); } @@ -99,7 +98,6 @@ public void MonadicCreate_NonCosmosArrayContinuationToken() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: CosmosObject.Create(new Dictionary())); Assert.IsTrue(monadicCreate.Failed); Assert.IsTrue(monadicCreate.InnerMostException is MalformedContinuationTokenException); @@ -121,7 +119,6 @@ public void MonadicCreate_EmptyArrayContinuationToken() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: CosmosArray.Create(new List())); Assert.IsTrue(monadicCreate.Failed); Assert.IsTrue(monadicCreate.InnerMostException is MalformedContinuationTokenException); @@ -143,7 +140,6 @@ public void MonadicCreate_NonParallelContinuationToken() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: CosmosArray.Create(new List() { CosmosString.Create("asdf") })); Assert.IsTrue(monadicCreate.Failed); Assert.IsTrue(monadicCreate.InnerMostException is MalformedContinuationTokenException); @@ -180,7 +176,6 @@ public void MonadicCreate_SingleOrderByContinuationToken() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: CosmosArray.Create( new List() { @@ -225,7 +220,6 @@ public void MonadicCreate_SingleOrderByContinuationToken() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: CosmosArray.Create( new List() { @@ -285,7 +279,6 @@ public void MonadicCreate_MultipleOrderByContinuationToken() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: CosmosArray.Create( new List() { @@ -328,7 +321,6 @@ public void MonadicCreate_OrderByWithResumeValues() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: CosmosArray.Create( new List() { @@ -370,7 +362,6 @@ public void MonadicCreate_OrderByWithResumeValues() }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: CosmosArray.Create( new List() { @@ -388,12 +379,12 @@ public async Task TestFormattedFiltersForTargetPartitionWithContinuationTokenAsy documents: new List(), requestCharge: 0, activityId: string.Empty, - responseLengthInBytes: 0, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: default, - state: default); + state: default, + streaming: default); string expectedQuerySpec = "SELECT * FROM c WHERE true ORDER BY c._ts"; Mock mockContainer = new Mock(MockBehavior.Strict); @@ -426,17 +417,16 @@ public async Task TestFormattedFiltersForTargetPartitionWithContinuationTokenAsy }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 1), maxConcurrency: 0, - cancellationToken: default, continuationToken: CosmosElement.Parse(continuationToken)); Assert.IsTrue(monadicCreate.Succeeded); IQueryPipelineStage queryPipelineStage = monadicCreate.Result; for (int i = 0; i < targetRanges.Count; ++i) { - Assert.IsTrue(await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)); + Assert.IsTrue(await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)); } - Assert.IsFalse(await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)); + Assert.IsFalse(await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)); } [TestMethod] @@ -462,13 +452,12 @@ FROM c }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: null); Assert.IsTrue(monadicCreate.Succeeded); IQueryPipelineStage queryPipelineStage = monadicCreate.Result; List documents = new List(); - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetQueryPage = queryPipelineStage.Current; if (tryGetQueryPage.Failed) @@ -479,7 +468,11 @@ FROM c QueryPage queryPage = tryGetQueryPage.Result; documents.AddRange(queryPage.Documents); - Assert.AreEqual(42, queryPage.RequestCharge); + if (queryPage.RequestCharge > 0) + { + // some empty pages may be emitted + Assert.AreEqual(42, queryPage.RequestCharge); + } } Assert.AreEqual(numItems, documents.Count); @@ -508,12 +501,12 @@ FROM c }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: null); Assert.IsTrue(monadicCreate.Succeeded); IQueryPipelineStage queryPipelineStage = monadicCreate.Result; - - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + + int countAdditionalHeadersReceived = 0; + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetQueryPage = queryPipelineStage.Current; if (tryGetQueryPage.Failed) @@ -521,11 +514,20 @@ FROM c Assert.Fail(tryGetQueryPage.Exception.ToString()); } - QueryPage queryPage = tryGetQueryPage.Result; - Assert.IsTrue(queryPage.AdditionalHeaders.Count > 0); - } - } - + QueryPage queryPage = tryGetQueryPage.Result; + if (queryPage.AdditionalHeaders.Count > 0) + { + ++countAdditionalHeadersReceived; + } + } + + int countFeedRanges = (await documentContainer.GetFeedRangesAsync( + trace: NoOpTrace.Singleton, + cancellationToken: default)) + .Count; + Assert.IsTrue(countAdditionalHeadersReceived >= countFeedRanges); + } + [TestMethod] [DataRow(false, false, false, DisplayName = "Use State: false, Allow Splits: false, Allow Merges: false")] [DataRow(false, false, true, DisplayName = "Use State: false, Allow Splits: false, Allow Merges: true")] @@ -556,7 +558,6 @@ FROM c }, queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), maxConcurrency: 10, - cancellationToken: default, continuationToken: continuationToken); monadicQueryPipelineStage.ThrowIfFailed(); IQueryPipelineStage queryPipelineStage = monadicQueryPipelineStage.Result; @@ -569,7 +570,7 @@ FROM c IQueryPipelineStage queryPipelineStage = await CreatePipelineStateAsync(inMemoryCollection, continuationToken: null); List documents = new List(); Random random = new Random(); - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetPage = queryPipelineStage.Current; tryGetPage.ThrowIfFailed(); @@ -584,7 +585,7 @@ FROM c { // We need to drain out all the initial empty pages, // since they are non resumable state. - Assert.IsTrue(await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)); + Assert.IsTrue(await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)); TryCatch tryGetQueryPage = queryPipelineStage.Current; if (tryGetQueryPage.Failed) { diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/ParallelCrossPartitionQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/ParallelCrossPartitionQueryPipelineStageTests.cs index e772996748..e4e06635b3 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/ParallelCrossPartitionQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/ParallelCrossPartitionQueryPipelineStageTests.cs @@ -38,7 +38,6 @@ public void MonadicCreate_NullContinuationToken() partitionKey: null, maxConcurrency: 10, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: default, continuationToken: null); Assert.IsTrue(monadicCreate.Succeeded); } @@ -56,7 +55,6 @@ public void MonadicCreate_NonCosmosArrayContinuationToken() partitionKey: null, maxConcurrency: 10, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: default, continuationToken: CosmosObject.Create(new Dictionary())); Assert.IsTrue(monadicCreate.Failed); Assert.IsTrue(monadicCreate.InnerMostException is MalformedContinuationTokenException); @@ -75,7 +73,6 @@ public void MonadicCreate_EmptyArrayContinuationToken() partitionKey: null, maxConcurrency: 10, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: default, continuationToken: CosmosArray.Create(new List())); Assert.IsTrue(monadicCreate.Failed); Assert.IsTrue(monadicCreate.InnerMostException is MalformedContinuationTokenException); @@ -94,7 +91,6 @@ public void MonadicCreate_NonParallelContinuationToken() partitionKey: null, maxConcurrency: 10, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: default, continuationToken: CosmosArray.Create(new List() { CosmosString.Create("asdf") })); Assert.IsTrue(monadicCreate.Failed); Assert.IsTrue(monadicCreate.InnerMostException is MalformedContinuationTokenException); @@ -117,7 +113,6 @@ public void MonadicCreate_SingleParallelContinuationToken() partitionKey: null, maxConcurrency: 10, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: default, continuationToken: CosmosArray.Create(new List() { ParallelContinuationToken.ToCosmosElement(token) })); Assert.IsTrue(monadicCreate.Succeeded); } @@ -147,7 +142,6 @@ public void MonadicCreate_MultipleParallelContinuationToken() partitionKey: null, maxConcurrency: 10, prefetchPolicy: PrefetchPolicy.PrefetchSinglePage, - cancellationToken: default, continuationToken: CosmosArray.Create( new List() { @@ -188,7 +182,6 @@ async Task CreatePipelineStateAsync(IDocumentContainer docu partitionKey: null, maxConcurrency: 10, prefetchPolicy: aggressivePrefetch ? PrefetchPolicy.PrefetchAll : PrefetchPolicy.PrefetchSinglePage, - cancellationToken: default, continuationToken: continuationToken); Assert.IsTrue(monadicQueryPipelineStage.Succeeded); IQueryPipelineStage queryPipelineStage = monadicQueryPipelineStage.Result; @@ -201,7 +194,7 @@ async Task CreatePipelineStateAsync(IDocumentContainer docu IQueryPipelineStage queryPipelineStage = await CreatePipelineStateAsync(inMemoryCollection, continuationToken: null); List documents = new List(); Random random = new Random(); - while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton)) + while (await queryPipelineStage.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)) { TryCatch tryGetPage = queryPipelineStage.Current; tryGetPage.ThrowIfFailed(); diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/PartitionRangePageAsyncEnumerable.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/PartitionRangePageAsyncEnumerable.cs index eb8981a65e..12a6ca9a1e 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/PartitionRangePageAsyncEnumerable.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/PartitionRangePageAsyncEnumerable.cs @@ -34,7 +34,7 @@ public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ca { cancellationToken.ThrowIfCancellationRequested(); - return new TracingAsyncEnumerator>(this.createPartitionRangeEnumerator(this.feedRangeState), this.trace); + return new TracingAsyncEnumerator>(this.createPartitionRangeEnumerator(this.feedRangeState), this.trace, cancellationToken); } } } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/SkipEmptyPageQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/SkipEmptyPageQueryPipelineStageTests.cs index 1b88966a8c..c498ade871 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/SkipEmptyPageQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/SkipEmptyPageQueryPipelineStageTests.cs @@ -26,7 +26,7 @@ public async Task StackOverflowTest() .Repeat(EmptyPagePipelineStage.PageType.Empty, 2000) .Concat(Enumerable.Repeat(EmptyPagePipelineStage.PageType.Error, 1)) .ToList()); - bool hasNext = await pipeline.MoveNextAsync(NoOpTrace.Singleton); + bool hasNext = await pipeline.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default); Assert.IsTrue(hasNext); TryCatch result = pipeline.Current; Assert.IsFalse(result.Succeeded); @@ -75,7 +75,7 @@ public async Task BasicTests() await using IQueryPipelineStage pipeline = CreatePipeline(testCase.Input); for (int index = 0; index < testCase.Expected.Count; ++index) { - Assert.IsTrue(await pipeline.MoveNextAsync(NoOpTrace.Singleton)); + Assert.IsTrue(await pipeline.MoveNextAsync(NoOpTrace.Singleton, cancellationToken: default)); if (testCase.Expected[index]) { @@ -112,11 +112,8 @@ public TestCase(IReadOnlyList input, IReadOnlyL private static IQueryPipelineStage CreatePipeline(IReadOnlyList pages) { EmptyPagePipelineStage emptyPagePipelineStage = new EmptyPagePipelineStage(pages); - SkipEmptyPageQueryPipelineStage skipEmptyPageStage = new SkipEmptyPageQueryPipelineStage( - inputStage: emptyPagePipelineStage, - cancellationToken: default); - - return new CatchAllQueryPipelineStage(inputStage: skipEmptyPageStage, cancellationToken: default); + SkipEmptyPageQueryPipelineStage skipEmptyPageStage = new SkipEmptyPageQueryPipelineStage(emptyPagePipelineStage); + return new CatchAllQueryPipelineStage(skipEmptyPageStage); } internal class EmptyPagePipelineStage : IQueryPipelineStage @@ -127,23 +124,23 @@ public enum PageType { Empty, NonEmpty, Error }; documents: new List(), requestCharge: 42, activityId: Guid.NewGuid().ToString(), - responseLengthInBytes: "[]".Length, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: default, - state: new QueryState(CosmosString.Create("Empty")))); + state: new QueryState(CosmosString.Create("Empty")), + streaming: default)); private static readonly TryCatch NonEmpty = TryCatch.FromResult(new QueryPage( documents: new List { CosmosElement.Parse("42") }, requestCharge: 100, activityId: Guid.NewGuid().ToString(), - responseLengthInBytes: "[42]".Length, cosmosQueryExecutionInfo: default, distributionPlanSpec: default, disallowContinuationTokenMessage: default, additionalHeaders: default, - state: new QueryState(CosmosString.Create("NonEmpty")))); + state: new QueryState(CosmosString.Create("NonEmpty")), + streaming: default)); private readonly IReadOnlyList pages; @@ -162,7 +159,7 @@ public ValueTask DisposeAsync() return new ValueTask(); } - public ValueTask MoveNextAsync(ITrace trace) + public ValueTask MoveNextAsync(ITrace trace, CancellationToken cancellationToken) { ++this.current; if (this.current >= this.pages.Count) @@ -189,10 +186,6 @@ public ValueTask MoveNextAsync(ITrace trace) return new ValueTask(true); } - - public void SetCancellationToken(CancellationToken cancellationToken) - { - } } } } \ No newline at end of file diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/SkipQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/SkipQueryPipelineStageTests.cs index d2749f9f9d..c134d4b72d 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/SkipQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/SkipQueryPipelineStageTests.cs @@ -52,8 +52,7 @@ private static async Task> CreateAndDrainAsync( executionEnvironment: executionEnvironment, offsetCount: offsetCount, continuationToken: continuationToken, - cancellationToken: default, - monadicCreatePipelineStage: (CosmosElement continuationToken, CancellationToken token) => TryCatch.FromResult(source)); + monadicCreatePipelineStage: (CosmosElement continuationToken) => TryCatch.FromResult(source)); Assert.IsTrue(tryCreateSkipQueryPipelineStage.Succeeded); IQueryPipelineStage aggregateQueryPipelineStage = tryCreateSkipQueryPipelineStage.Result; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/TakeQueryPipelineStageTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/TakeQueryPipelineStageTests.cs index 36b2c189c9..c86e2c7905 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/TakeQueryPipelineStageTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/Pipeline/TakeQueryPipelineStageTests.cs @@ -92,8 +92,7 @@ public async Task BasicTests() executionEnvironment: executionEnvironment, limitCount: takeCount, requestContinuationToken: continuationToken, - cancellationToken: default, - monadicCreatePipelineStage: (CosmosElement continuationToken, CancellationToken cancellationToken) => TryCatch.FromResult(source)); + monadicCreatePipelineStage: (CosmosElement continuationToken) => TryCatch.FromResult(source)); Assert.IsTrue(tryCreateSkipQueryPipelineStage.Succeeded); IQueryPipelineStage takeQueryPipelineStage = tryCreateSkipQueryPipelineStage.Result; diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/QueryPartitionRangePageEnumeratorTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/QueryPartitionRangePageEnumeratorTests.cs index 4672fb17f5..2bb6813ff6 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/QueryPartitionRangePageEnumeratorTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Query/QueryPartitionRangePageEnumeratorTests.cs @@ -100,8 +100,7 @@ public async Task TestSplitAsync() sqlQuerySpec: new Cosmos.Query.Core.SqlQuerySpec("SELECT * FROM c"), feedRangeState: feedRangeState, partitionKey: null, - queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), - cancellationToken: default), + queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10)), trace: NoOpTrace.Singleton); HashSet resourceIdentifiers = await this.DrainFullyAsync(enumerable); @@ -143,8 +142,7 @@ protected override IAsyncEnumerable> CreateEnumerable( sqlQuerySpec: new Cosmos.Query.Core.SqlQuerySpec("SELECT * FROM c"), feedRangeState: feedRangeState, partitionKey: null, - queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), - cancellationToken: default), + queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10)), trace: NoOpTrace.Singleton); } @@ -166,9 +164,9 @@ protected override Task>> CreateEnumeratorA sqlQuerySpec: new Cosmos.Query.Core.SqlQuerySpec("SELECT * FROM c"), feedRangeState: new FeedRangeState(ranges[0], state), partitionKey: null, - queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10), - cancellationToken: cancellationToken), - trace: NoOpTrace.Singleton); + queryPaginationOptions: new QueryPaginationOptions(pageSizeHint: 10)), + trace: NoOpTrace.Singleton, + cancellationToken: cancellationToken); return Task.FromResult(enumerator); } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Scenarios/GremlinScenarioTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Scenarios/GremlinScenarioTests.cs index 093944190e..1a894d7f1b 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Scenarios/GremlinScenarioTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Scenarios/GremlinScenarioTests.cs @@ -644,7 +644,6 @@ internal void GetCosmosElementsFromQueryResponseTest(JsonSerializationFormat jso QueryResponse queryResponse = QueryResponse.CreateSuccess( vertexArray, count: 2, - responseLengthBytes: vertex1JsonWriterResult.Length + vertex2JsonWriterResult.Length, serializationOptions: null, trace: NoOpTrace.Singleton, responseHeaders: CosmosQueryResponseMessageHeaders.ConvertToQueryHeaders( @@ -726,7 +725,6 @@ internal void GetDeserializedObjectsFromQueryResponseTest(JsonSerializationForma QueryResponse queryResponse = QueryResponse.CreateSuccess( vertexArray, count: 2, - responseLengthBytes: vertex1JsonWriterResult.Length + vertex2JsonWriterResult.Length, serializationOptions: null, trace: NoOpTrace.Singleton, responseHeaders: CosmosQueryResponseMessageHeaders.ConvertToQueryHeaders( diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/TestHandler.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/TestHandler.cs index 2d947912e2..199fb3c88f 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/TestHandler.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/TestHandler.cs @@ -38,8 +38,11 @@ public static Task ReturnSuccess() { ResponseMessage responseMessage = new ResponseMessage(HttpStatusCode.OK) { - Content = new MemoryStream(Encoding.UTF8.GetBytes(@"{ ""Documents"": [{ ""id"": ""Test""}]}")) - }; + Content = new MemoryStream(Encoding.UTF8.GetBytes(@"{ ""_count"": 1, ""Documents"": [{ ""id"": ""Test""}]}")) + }; + + responseMessage.Headers.Add(HttpConstants.HttpHeaders.ItemCount, "1"); + return responseMessage; }); } diff --git a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Tracing/TraceWriterBaselineTests.cs b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Tracing/TraceWriterBaselineTests.cs index 2d69341182..aa2e145850 100644 --- a/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Tracing/TraceWriterBaselineTests.cs +++ b/Microsoft.Azure.Cosmos/tests/Microsoft.Azure.Cosmos.Tests/Tracing/TraceWriterBaselineTests.cs @@ -526,14 +526,13 @@ public async Task ScenariosAsync() CrossPartitionReadFeedAsyncEnumerator enumerator = CrossPartitionReadFeedAsyncEnumerator.Create( documentContainer, new CrossFeedRangeState(ReadFeedCrossFeedRangeState.CreateFromBeginning().FeedRangeStates), - new ReadFeedPaginationOptions(pageSizeHint: 10), - cancellationToken: default); + new ReadFeedPaginationOptions(pageSizeHint: 10)); int numChildren = 1; // One extra since we need to read one past the last user page to get the null continuation. TraceForBaselineTesting rootTrace; using (rootTrace = TraceForBaselineTesting.GetRootTrace()) { - while (await enumerator.MoveNextAsync(rootTrace)) + while (await enumerator.MoveNextAsync(rootTrace, cancellationToken: default)) { numChildren++; } @@ -559,14 +558,13 @@ public async Task ScenariosAsync() ChangeFeedCrossFeedRangeState.CreateFromBeginning().FeedRangeStates), new ChangeFeedPaginationOptions( ChangeFeedMode.Incremental, - pageSizeHint: int.MaxValue), - cancellationToken: default); + pageSizeHint: int.MaxValue)); int numChildren = 0; TraceForBaselineTesting rootTrace; using (rootTrace = TraceForBaselineTesting.GetRootTrace()) { - while (await enumerator.MoveNextAsync(rootTrace)) + while (await enumerator.MoveNextAsync(rootTrace, cancellationToken: default)) { numChildren++; @@ -597,7 +595,7 @@ public async Task ScenariosAsync() int numChildren = (await documentContainer.GetFeedRangesAsync(NoOpTrace.Singleton, default)).Count; // One extra since we need to read one past the last user page to get the null continuation. using (rootTrace = TraceForBaselineTesting.GetRootTrace()) { - while (await pipelineStage.MoveNextAsync(rootTrace)) + while (await pipelineStage.MoveNextAsync(rootTrace, cancellationToken: default)) { numChildren++; } @@ -759,7 +757,6 @@ private static IQueryPipelineStage CreatePipeline(IDocumentContainer documentCon GetQueryPlan(query), new QueryPaginationOptions(pageSizeHint: pageSize), maxConcurrency: 10, - requestCancellationToken: default, requestContinuationToken: state); tryCreatePipeline.ThrowIfFailed();