diff --git a/sdk/cosmosdb/cosmos/.vscode/launch.json b/sdk/cosmosdb/cosmos/.vscode/launch.json deleted file mode 100644 index 79bae02a276a..000000000000 --- a/sdk/cosmosdb/cosmos/.vscode/launch.json +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -{ - "version": "0.2.0", - "configurations": [ - { - "name": "Test: Current Open File", - "program": "${workspaceFolder}/node_modules/mocha/bin/_mocha", - "type": "node", - "args": [ - "--timeout", - "100000", - "-r", - "test/mocha.env.ts", - "-r", - "ts-node/register", - "-r", - "esm", - "-r", - "dotenv/config", - "-r", - "./test/public/common/setup.ts", - "--colors", - "**/${fileBasenameNoExtension}.ts" - ], - "internalConsoleOptions": "openOnSessionStart", - "request": "launch", - "skipFiles": [ - "/**" - ], - }, - { - "name": "Test: Selected Test Case", - "program": "${workspaceFolder}/node_modules/mocha/bin/_mocha", - "type": "node", - "args": [ - "--timeout", - "100000", - "-r", - "test/mocha.env.ts", - "-r", - "ts-node/register", - "-r", - "esm", - "-r", - "dotenv/config", - "-r", - "./test/public/common/setup.ts", - "--colors", - "**/${fileBasenameNoExtension}.ts", - "-g", - "${selectedText}" - ], - "internalConsoleOptions": "openOnSessionStart", - "request": "launch", - "skipFiles": [ - "/**" - ], - } - ] -} \ No newline at end of file diff --git a/sdk/cosmosdb/cosmos/review/cosmos.api.md b/sdk/cosmosdb/cosmos/review/cosmos.api.md index ffad890a801f..fe18c0a83e7d 100644 --- a/sdk/cosmosdb/cosmos/review/cosmos.api.md +++ b/sdk/cosmosdb/cosmos/review/cosmos.api.md @@ -90,7 +90,7 @@ export class ClientContext { batch({ body, path, partitionKey, resourceId, options, }: { body: T; path: string; - partitionKey: string; + partitionKey: PartitionKey; resourceId: string; options?: RequestOptions; }): Promise>; @@ -584,7 +584,7 @@ export interface CreateOperationInput { // (undocumented) operationType: typeof BulkOperationType.Create; // (undocumented) - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; // (undocumented) resourceBody: JSONObject; } @@ -694,7 +694,7 @@ export interface DeleteOperationInput { // (undocumented) operationType: typeof BulkOperationType.Delete; // (undocumented) - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; } // @public (undocumented) @@ -734,8 +734,10 @@ export type ExistingKeyOperation = { path: string; }; -// @public (undocumented) -export function extractPartitionKey(document: unknown, partitionKeyDefinition: PartitionKeyDefinition): PartitionKey[]; +// Warning: (ae-forgotten-export) The symbol "PartitionKeyInternal" needs to be exported by the entry point index.d.ts +// +// @public +export function extractPartitionKey(document: unknown, partitionKeyDefinition?: PartitionKeyDefinition): PartitionKeyInternal | undefined; // @public export interface FeedOptions extends SharedOptions { @@ -752,7 +754,7 @@ export interface FeedOptions extends SharedOptions { forceQueryPlan?: boolean; maxDegreeOfParallelism?: number; maxItemCount?: number; - partitionKey?: any; + partitionKey?: PartitionKey; populateQueryMetrics?: boolean; useIncrementalFeed?: boolean; } @@ -870,7 +872,7 @@ export enum IndexKind { // @public export class Item { - constructor(container: Container, id: string, partitionKey: PartitionKey, clientContext: ClientContext); + constructor(container: Container, id: string, clientContext: ClientContext, partitionKey?: PartitionKey); // (undocumented) readonly container: Container; delete(options?: RequestOptions): Promise>; @@ -900,7 +902,7 @@ export class ItemResponse extends ResourceResponse>; + batch(operations: OperationInput[], partitionKey?: PartitionKey, options?: RequestOptions): Promise>; bulk(operations: OperationInput[], bulkOptions?: BulkOptions, options?: RequestOptions): Promise; changeFeed(partitionKey: string | number | boolean, changeFeedOptions?: ChangeFeedOptions): ChangeFeedIterator; changeFeed(changeFeedOptions?: ChangeFeedOptions): ChangeFeedIterator; @@ -1074,15 +1076,20 @@ export interface PartitionedQueryExecutionInfo { queryRanges: QueryRange[]; } +// Warning: (ae-forgotten-export) The symbol "PrimitivePartitionKeyValue" needs to be exported by the entry point index.d.ts +// // @public (undocumented) -export type PartitionKey = PartitionKeyDefinition | string | number | unknown; +export type PartitionKey = PrimitivePartitionKeyValue | PrimitivePartitionKeyValue[]; // @public (undocumented) export interface PartitionKeyDefinition { + // Warning: (ae-forgotten-export) The symbol "PartitionKeyKind" needs to be exported by the entry point index.d.ts + kind?: PartitionKeyKind; paths: string[]; // (undocumented) systemKey?: boolean; - version?: number; + // Warning: (ae-forgotten-export) The symbol "PartitionKeyDefinitionVersion" needs to be exported by the entry point index.d.ts + version?: PartitionKeyDefinitionVersion; } // @public (undocumented) @@ -1127,7 +1134,7 @@ export interface PatchOperationInput { // (undocumented) operationType: typeof BulkOperationType.Patch; // (undocumented) - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; // (undocumented) resourceBody: PatchRequestBody; } @@ -1381,7 +1388,7 @@ export interface ReadOperationInput { // (undocumented) operationType: typeof BulkOperationType.Read; // (undocumented) - partitionKey?: string | number | boolean | null | Record | undefined; + partitionKey?: PartitionKey; } // @public (undocumented) @@ -1407,7 +1414,7 @@ export interface ReplaceOperationInput { // (undocumented) operationType: typeof BulkOperationType.Replace; // (undocumented) - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; // (undocumented) resourceBody: JSONObject; } @@ -2014,7 +2021,7 @@ export interface UpsertOperationInput { // (undocumented) operationType: typeof BulkOperationType.Upsert; // (undocumented) - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; // (undocumented) resourceBody: JSONObject; } diff --git a/sdk/cosmosdb/cosmos/src/ClientContext.ts b/sdk/cosmosdb/cosmos/src/ClientContext.ts index f72c13940d2b..9206c1cd3f39 100644 --- a/sdk/cosmosdb/cosmos/src/ClientContext.ts +++ b/sdk/cosmosdb/cosmos/src/ClientContext.ts @@ -13,7 +13,13 @@ import { Constants, HTTPMethod, OperationType, ResourceType } from "./common/con import { getIdFromLink, getPathFromLink, parseLink } from "./common/helper"; import { StatusCodes, SubStatusCodes } from "./common/statusCodes"; import { Agent, CosmosClientOptions } from "./CosmosClientOptions"; -import { ConnectionPolicy, ConsistencyLevel, DatabaseAccount, PartitionKey } from "./documents"; +import { + ConnectionPolicy, + ConsistencyLevel, + DatabaseAccount, + PartitionKey, + convertToInternalPartitionKey, +} from "./documents"; import { GlobalEndpointManager } from "./globalEndpointManager"; import { PluginConfig, PluginOn, executePlugins } from "./plugins/Plugin"; import { FetchFunctionCallback, SqlQuerySpec } from "./queryExecutionContext"; @@ -600,7 +606,7 @@ export class ClientContext { }: { body: T; path: string; - partitionKey: string; + partitionKey: PartitionKey; resourceId: string; options?: RequestOptions; }): Promise> { @@ -757,12 +763,16 @@ export class ClientContext { options: requestContext.options, partitionKeyRangeId: requestContext.partitionKeyRangeId, useMultipleWriteLocations: this.connectionPolicy.useMultipleWriteLocations, - partitionKey: requestContext.partitionKey, + partitionKey: + requestContext.partitionKey !== undefined + ? convertToInternalPartitionKey(requestContext.partitionKey) + : undefined, // TODO: Move this check from here to PartitionKey }); } /** - * Returns collection of properties which are derived from the context for Request Creation + * Returns collection of properties which are derived from the context for Request Creation. + * These properties have client wide scope, as opposed to request specific scope. * @returns */ private getContextDerivedPropsForRequestCreation(): { diff --git a/sdk/cosmosdb/cosmos/src/client/Container/Container.ts b/sdk/cosmosdb/cosmos/src/client/Container/Container.ts index ef671abd2031..df57a0c25af2 100644 --- a/sdk/cosmosdb/cosmos/src/client/Container/Container.ts +++ b/sdk/cosmosdb/cosmos/src/client/Container/Container.ts @@ -107,7 +107,7 @@ export class Container { * `const {body: replacedItem} = await container.item("", "").replace({id: "", title: "Updated post", authorID: 5});` */ public item(id: string, partitionKeyValue?: PartitionKey): Item { - return new Item(this, id, partitionKeyValue, this.clientContext); + return new Item(this, id, this.clientContext, partitionKeyValue); } /** diff --git a/sdk/cosmosdb/cosmos/src/client/Item/Item.ts b/sdk/cosmosdb/cosmos/src/client/Item/Item.ts index d402f3687cc1..10255933e6d9 100644 --- a/sdk/cosmosdb/cosmos/src/client/Item/Item.ts +++ b/sdk/cosmosdb/cosmos/src/client/Item/Item.ts @@ -9,7 +9,7 @@ import { ResourceType, StatusCodes, } from "../../common"; -import { PartitionKey } from "../../documents"; +import { PartitionKey, PartitionKeyInternal, convertToInternalPartitionKey } from "../../documents"; import { extractPartitionKey, undefinedPartitionKey } from "../../extractPartitionKey"; import { RequestOptions, Response } from "../../request"; import { PatchRequestBody } from "../../utils/patch"; @@ -24,7 +24,7 @@ import { ItemResponse } from "./ItemResponse"; * @see {@link Items} for operations on all items; see `container.items`. */ export class Item { - private partitionKey: PartitionKey; + private partitionKey: PartitionKeyInternal; /** * Returns a reference URL to the resource. Used for linking in Permissions. */ @@ -41,10 +41,11 @@ export class Item { constructor( public readonly container: Container, public readonly id: string, - partitionKey: PartitionKey, - private readonly clientContext: ClientContext + private readonly clientContext: ClientContext, + partitionKey?: PartitionKey ) { - this.partitionKey = partitionKey; + this.partitionKey = + partitionKey === undefined ? undefined : convertToInternalPartitionKey(partitionKey); } /** diff --git a/sdk/cosmosdb/cosmos/src/client/Item/Items.ts b/sdk/cosmosdb/cosmos/src/client/Item/Items.ts index 371e40f7a8e1..bed14a38188b 100644 --- a/sdk/cosmosdb/cosmos/src/client/Item/Items.ts +++ b/sdk/cosmosdb/cosmos/src/client/Item/Items.ts @@ -17,16 +17,15 @@ import { ItemResponse } from "./ItemResponse"; import { Batch, isKeyInRange, - Operation, - getPartitionKeyToHash, - decorateOperation, + prepareOperations, OperationResponse, OperationInput, BulkOptions, decorateBatchOperation, } from "../../utils/batch"; -import { hashV1PartitionKey } from "../../utils/hashing/v1"; -import { hashV2PartitionKey } from "../../utils/hashing/v2"; +import { assertNotUndefined } from "../../utils/typeChecks"; +import { hashPartitionKey } from "../../utils/hashing/hash"; +import { PartitionKey, PartitionKeyDefinition } from "../../documents"; /** * @hidden @@ -288,8 +287,8 @@ export class Items { const ref = new Item( this.container, (response.result as any).id, - partitionKey, - this.clientContext + this.clientContext, + partitionKey ); return new ItemResponse( response.result, @@ -360,8 +359,8 @@ export class Items { const ref = new Item( this.container, (response.result as any).id, - partitionKey, - this.clientContext + this.clientContext, + partitionKey ); return new ItemResponse( response.result, @@ -408,7 +407,8 @@ export class Items { const { resources: partitionKeyRanges } = await this.container .readPartitionKeyRanges() .fetchAll(); - const { resource: definition } = await this.container.getPartitionKeyDefinition(); + const { resource } = await this.container.readPartitionKeyDefinition(); + const partitionDefinition = assertNotUndefined(resource, "PartitionKeyDefinition."); const batches: Batch[] = partitionKeyRanges.map((keyRange: PartitionKeyRange) => { return { min: keyRange.minInclusive, @@ -418,19 +418,8 @@ export class Items { operations: [], }; }); - operations - .map((operation) => decorateOperation(operation, definition, options)) - .forEach((operation: Operation, index: number) => { - const partitionProp = definition.paths[0].replace("/", ""); - const isV2 = definition.version && definition.version === 2; - const toHashKey = getPartitionKeyToHash(operation, partitionProp); - const hashed = isV2 ? hashV2PartitionKey(toHashKey) : hashV1PartitionKey(toHashKey); - const batchForKey = batches.find((batch: Batch) => { - return isKeyInRange(batch.min, batch.max, hashed); - }); - batchForKey.operations.push(operation); - batchForKey.indexes.push(index); - }); + + this.groupOperationsBasedOnPartitionKey(operations, partitionDefinition, options, batches); const path = getPathFromLink(this.container.url, ResourceType.item); @@ -460,7 +449,8 @@ export class Items { // partition key types as well since we don't support them, so for now we throw if (err.code === 410) { throw new Error( - "Partition key error. Either the partitions have split or an operation has an unsupported partitionKey type" + "Partition key error. Either the partitions have split or an operation has an unsupported partitionKey type" + + err.message ); } throw new Error(`Bulk request errored with: ${err.message}`); @@ -470,6 +460,43 @@ export class Items { return orderedResponses; } + /** + * Function to create batches based of partition key Ranges. + * @param operations - operations to group + * @param partitionDefinition - PartitionKey definition of container. + * @param options - Request options for bulk request. + * @param batches - Groups to be filled with operations. + */ + private groupOperationsBasedOnPartitionKey( + operations: OperationInput[], + partitionDefinition: PartitionKeyDefinition, + options: RequestOptions | undefined, + batches: Batch[] + ) { + operations.forEach((operationInput, index: number) => { + const { operation, partitionKey } = prepareOperations( + operationInput, + partitionDefinition, + options + ); + const hashed = hashPartitionKey( + assertNotUndefined( + partitionKey, + "undefined value for PartitionKey not expected during grouping of bulk operations." + ), + partitionDefinition + ); + const batchForKey = assertNotUndefined( + batches.find((batch: Batch) => { + return isKeyInRange(batch.min, batch.max, hashed); + }), + "No suitable Batch found." + ); + batchForKey.operations.push(operation); + batchForKey.indexes.push(index); + }); + } + /** * Execute transactional batch operations on items. * @@ -499,7 +526,7 @@ export class Items { */ public async batch( operations: OperationInput[], - partitionKey: string = "[{}]", + partitionKey?: PartitionKey, options?: RequestOptions ): Promise> { operations.map((operation) => decorateBatchOperation(operation, options)); diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKey.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKey.ts index be19cd7d496d..43bb0eafe83a 100644 --- a/sdk/cosmosdb/cosmos/src/documents/PartitionKey.ts +++ b/sdk/cosmosdb/cosmos/src/documents/PartitionKey.ts @@ -1,5 +1,50 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { PartitionKeyDefinition } from "./PartitionKeyDefinition"; -export type PartitionKey = PartitionKeyDefinition | string | number | unknown; +import { NonePartitionKeyLiteral, NullPartitionKeyLiteral } from "./PartitionKeyInternal"; + +export type PartitionKey = PrimitivePartitionKeyValue | PrimitivePartitionKeyValue[]; + +/** + * A primitive Partition Key value. + */ +export type PrimitivePartitionKeyValue = + | string + | number + | boolean + | NullPartitionKeyType + | NonePartitionKeyType; + +/** + * The returned object represents a partition key value that allows creating and accessing items + * with a null value for the partition key. + */ +export type NullPartitionKeyType = null; + +/** + * The returned object represents a partition key value that allows creating and accessing items + * without a value for partition key + */ +export type NonePartitionKeyType = { + [K in any]: never; +}; + +/** + * Builder class for building PartitionKey. + */ +export class PartitionKeyBuilder { + readonly values: PrimitivePartitionKeyValue[] = []; + public addValue(value: string | boolean | number): PartitionKeyBuilder { + this.values.push(value); + return this; + } + public addNullValue(): void { + this.values.push(NullPartitionKeyLiteral); + } + public addNoneValue(): void { + this.values.push(NonePartitionKeyLiteral); + } + public build(): PartitionKey { + return [...this.values]; + } +} diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinition.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinition.ts index 983bb837236c..b4186e04bc9c 100644 --- a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinition.ts +++ b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinition.ts @@ -1,5 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +import { PartitionKeyDefinitionVersion } from "./PartitionKeyDefinitionVersion"; +import { PartitionKeyKind } from "./PartitionKeyKind"; + export interface PartitionKeyDefinition { /** * An array of paths for which data within the collection can be partitioned. Paths must not contain a wildcard or @@ -11,6 +14,10 @@ export interface PartitionKeyDefinition { * An optional field, if not specified the default value is 1. To use the large partition key set the version to 2. * To learn about large partition keys, see [how to create containers with large partition key](https://docs.microsoft.com/en-us/azure/cosmos-db/large-partition-keys) article. */ - version?: number; + version?: PartitionKeyDefinitionVersion; systemKey?: boolean; + /** + * What kind of partition key is being defined (default: "Hash") + */ + kind?: PartitionKeyKind; } diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinitionVersion.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinitionVersion.ts new file mode 100644 index 000000000000..9021aff9dd21 --- /dev/null +++ b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyDefinitionVersion.ts @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * PartitionKey Definition Version + */ +export enum PartitionKeyDefinitionVersion { + V1 = 1, + V2 = 2, +} diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyInternal.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyInternal.ts new file mode 100644 index 000000000000..9c4267309fea --- /dev/null +++ b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyInternal.ts @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { + NonePartitionKeyType, + NullPartitionKeyType, + PartitionKey, + PrimitivePartitionKeyValue, +} from "./PartitionKey"; + +/** + * @hidden + * Internal Representation Of Partition Key. TODO: Make sure {@link ClientContext} working with only {@link PartitionKeyInternal} + */ +export type PartitionKeyInternal = PrimitivePartitionKeyValue[]; +/** + * @hidden + * None PartitionKey Literal + */ +export const NonePartitionKeyLiteral: NonePartitionKeyType = {}; +/** + * @hidden + * Null PartitionKey Literal + */ +export const NullPartitionKeyLiteral: NullPartitionKeyType = null; +/** + * @hidden + * Maps PartitionKey to InternalPartitionKey. + * @param partitionKey - PartitonKey to be converted. + * @returns PartitionKeyInternal + */ +export function convertToInternalPartitionKey(partitionKey: PartitionKey): PartitionKeyInternal { + if (Array.isArray(partitionKey)) return partitionKey; + else return [partitionKey]; +} diff --git a/sdk/cosmosdb/cosmos/src/documents/PartitionKeyKind.ts b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyKind.ts new file mode 100644 index 000000000000..b8303e237dc3 --- /dev/null +++ b/sdk/cosmosdb/cosmos/src/documents/PartitionKeyKind.ts @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +/** + * Type of PartitionKey i.e. Hash, MultiHash + */ +export enum PartitionKeyKind { + Hash = "Hash", + MultiHash = "MultiHash", +} diff --git a/sdk/cosmosdb/cosmos/src/documents/index.ts b/sdk/cosmosdb/cosmos/src/documents/index.ts index 0c6afe336af8..46df0c1110b4 100644 --- a/sdk/cosmosdb/cosmos/src/documents/index.ts +++ b/sdk/cosmosdb/cosmos/src/documents/index.ts @@ -10,6 +10,9 @@ export * from "./IndexingMode"; export * from "./IndexingPolicy"; export * from "./IndexKind"; export * from "./PartitionKey"; +export * from "./PartitionKeyInternal"; +export * from "./PartitionKeyDefinitionVersion"; +export * from "./PartitionKeyKind"; export * from "./PartitionKeyDefinition"; export * from "./PermissionMode"; export * from "./TriggerOperation"; diff --git a/sdk/cosmosdb/cosmos/src/extractPartitionKey.ts b/sdk/cosmosdb/cosmos/src/extractPartitionKey.ts index e4ef2c425742..66cdec2d8a01 100644 --- a/sdk/cosmosdb/cosmos/src/extractPartitionKey.ts +++ b/sdk/cosmosdb/cosmos/src/extractPartitionKey.ts @@ -1,47 +1,78 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +import { AzureLogger, createClientLogger } from "@azure/logger"; import { parsePath } from "./common"; -import { PartitionKey, PartitionKeyDefinition } from "./documents"; +import { + NonePartitionKeyLiteral, + NullPartitionKeyLiteral, + PartitionKeyDefinition, + PartitionKeyInternal, + PrimitivePartitionKeyValue, +} from "./documents"; + +const logger: AzureLogger = createClientLogger("extractPartitionKey"); /** + * Function to extract PartitionKey based on {@link PartitionKeyDefinition} + * from an object. + * Retuns + * 1. PartitionKeyInternal[] if extraction is successful. + * 2. undefined if either {@link partitionKeyDefinition} is not well formed + * or an unsupported partitionkey type is encountered. * @hidden */ export function extractPartitionKey( document: unknown, - partitionKeyDefinition: PartitionKeyDefinition -): PartitionKey[] { + partitionKeyDefinition?: PartitionKeyDefinition +): PartitionKeyInternal | undefined { if ( partitionKeyDefinition && partitionKeyDefinition.paths && partitionKeyDefinition.paths.length > 0 ) { - const partitionKey: PartitionKey[] = []; + const partitionKeys: PrimitivePartitionKeyValue[] = []; partitionKeyDefinition.paths.forEach((path: string) => { - const pathParts = parsePath(path); + const pathParts: string[] = parsePath(path); let obj = document; for (const part of pathParts) { - if (typeof obj === "object" && part in obj) { + if (typeof obj === "object" && obj !== null && part in obj) { obj = (obj as Record)[part]; } else { obj = undefined; break; } } - partitionKey.push(obj); + if (typeof obj === "string" || typeof obj === "number" || typeof obj === "boolean") { + partitionKeys.push(obj); + } else if (obj === NullPartitionKeyLiteral) { + partitionKeys.push(NullPartitionKeyLiteral); + } else if ( + obj === undefined || + JSON.stringify(obj) === JSON.stringify(NonePartitionKeyLiteral) + ) { + if (partitionKeyDefinition.systemKey === true) { + return []; + } + partitionKeys.push(NonePartitionKeyLiteral); + } else { + logger.warning("Unsupported PartitionKey found."); + return undefined; + } }); - if (partitionKey.length === 1 && partitionKey[0] === undefined) { - return undefinedPartitionKey(partitionKeyDefinition); - } - return partitionKey; + return partitionKeys; } + logger.warning("Unexpected Partition Key Definition Found."); + return undefined; } /** * @hidden */ -export function undefinedPartitionKey(partitionKeyDefinition: PartitionKeyDefinition): unknown[] { +export function undefinedPartitionKey( + partitionKeyDefinition: PartitionKeyDefinition +): PartitionKeyInternal { if (partitionKeyDefinition.systemKey === true) { return []; } else { - return [{}]; + return partitionKeyDefinition.paths.map(() => NonePartitionKeyLiteral); } } diff --git a/sdk/cosmosdb/cosmos/src/request/FeedOptions.ts b/sdk/cosmosdb/cosmos/src/request/FeedOptions.ts index 5650bd4ebb5e..2e7e32fa9f2c 100644 --- a/sdk/cosmosdb/cosmos/src/request/FeedOptions.ts +++ b/sdk/cosmosdb/cosmos/src/request/FeedOptions.ts @@ -1,5 +1,6 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +import { PartitionKey } from "../documents"; import { SharedOptions } from "./SharedOptions"; /** @@ -95,5 +96,5 @@ export interface FeedOptions extends SharedOptions { * The former is useful when the query body is out of your control * but you still want to restrict it to a single partition. Example: an end user specified query. */ - partitionKey?: any; + partitionKey?: PartitionKey; } diff --git a/sdk/cosmosdb/cosmos/src/request/request.ts b/sdk/cosmosdb/cosmos/src/request/request.ts index 65a1e42daf74..b680398f8821 100644 --- a/sdk/cosmosdb/cosmos/src/request/request.ts +++ b/sdk/cosmosdb/cosmos/src/request/request.ts @@ -3,7 +3,7 @@ import { setAuthorizationHeader } from "../auth"; import { Constants, HTTPMethod, jsonStringifyAndEscapeNonASCII, ResourceType } from "../common"; import { CosmosClientOptions } from "../CosmosClientOptions"; -import { PartitionKey } from "../documents"; +import { PartitionKeyInternal } from "../documents"; import { CosmosHeaders } from "../queryExecutionContext"; import { FeedOptions, RequestOptions } from "./index"; import { defaultLogger } from "../common/logger"; @@ -41,7 +41,7 @@ interface GetHeadersOptions { options: RequestOptions & FeedOptions; partitionKeyRangeId?: string; useMultipleWriteLocations?: boolean; - partitionKey?: PartitionKey; + partitionKey?: PartitionKeyInternal; } const JsonContentType = "application/json"; @@ -168,9 +168,6 @@ export async function getHeaders({ } if (partitionKey !== undefined && !headers[Constants.HttpHeaders.PartitionKey]) { - if (partitionKey === null || !Array.isArray(partitionKey)) { - partitionKey = [partitionKey as string]; - } headers[Constants.HttpHeaders.PartitionKey] = jsonStringifyAndEscapeNonASCII(partitionKey); } diff --git a/sdk/cosmosdb/cosmos/src/utils/batch.ts b/sdk/cosmosdb/cosmos/src/utils/batch.ts index 537d2f2f3bad..d90a7dc01ae7 100644 --- a/sdk/cosmosdb/cosmos/src/utils/batch.ts +++ b/sdk/cosmosdb/cosmos/src/utils/batch.ts @@ -3,10 +3,17 @@ import { JSONObject } from "../queryExecutionContext"; import { extractPartitionKey } from "../extractPartitionKey"; -import { PartitionKeyDefinition } from "../documents"; +import { + NonePartitionKeyLiteral, + PartitionKey, + PartitionKeyDefinition, + PrimitivePartitionKeyValue, + convertToInternalPartitionKey, +} from "../documents"; import { RequestOptions } from ".."; import { PatchRequestBody } from "./patch"; import { v4 } from "uuid"; +import { assertNotUndefined } from "./typeChecks"; const uuid = v4; export type Operation = @@ -70,7 +77,7 @@ export type OperationInput = | PatchOperationInput; export interface CreateOperationInput { - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; ifMatch?: string; ifNoneMatch?: string; operationType: typeof BulkOperationType.Create; @@ -78,7 +85,7 @@ export interface CreateOperationInput { } export interface UpsertOperationInput { - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; ifMatch?: string; ifNoneMatch?: string; operationType: typeof BulkOperationType.Upsert; @@ -86,19 +93,19 @@ export interface UpsertOperationInput { } export interface ReadOperationInput { - partitionKey?: string | number | boolean | null | Record | undefined; + partitionKey?: PartitionKey; operationType: typeof BulkOperationType.Read; id: string; } export interface DeleteOperationInput { - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; operationType: typeof BulkOperationType.Delete; id: string; } export interface ReplaceOperationInput { - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; ifMatch?: string; ifNoneMatch?: string; operationType: typeof BulkOperationType.Replace; @@ -107,7 +114,7 @@ export interface ReplaceOperationInput { } export interface PatchOperationInput { - partitionKey?: string | number | null | Record | undefined; + partitionKey?: PartitionKey; ifMatch?: string; ifNoneMatch?: string; operationType: typeof BulkOperationType.Patch; @@ -155,59 +162,77 @@ export function hasResource( (operation as OperationWithItem).resourceBody !== undefined ); } - -export function getPartitionKeyToHash(operation: Operation, partitionProperty: string): any { - const toHashKey = hasResource(operation) - ? deepFind(operation.resourceBody, partitionProperty) - : (operation.partitionKey && operation.partitionKey.replace(/[[\]"']/g, "")) || - operation.partitionKey; - // We check for empty object since replace will stringify the value - // The second check avoids cases where the partitionKey value is actually the string '{}' - if (toHashKey === "{}" && operation.partitionKey === "[{}]") { - return {}; - } - if (toHashKey === "null" && operation.partitionKey === "[null]") { - return null; - } - if (toHashKey === "0" && operation.partitionKey === "[0]") { - return 0; +/** + * Maps OperationInput to Operation by + * - generating Ids if needed. + * - choosing partitionKey which can be used to choose which batch this + * operation should be part of. The order is - + * 1. If the operationInput itself has partitionKey field set it is used. + * 2. Other wise for create/replace/upsert it is extracted from resource body. + * 3. For read/delete/patch type operations undefined partitionKey is used. + * - Here one nuance is that, the partitionKey field inside Operation needs to + * be serialized as a JSON string. + * @param operationInput - OperationInput + * @param definition - PartitionKeyDefinition + * @param options - RequestOptions + * @returns + */ +export function prepareOperations( + operationInput: OperationInput, + definition: PartitionKeyDefinition, + options: RequestOptions = {} +): { + operation: Operation; + partitionKey: PrimitivePartitionKeyValue[]; +} { + populateIdsIfNeeded(operationInput, options); + + let partitionKey: PrimitivePartitionKeyValue[]; + if (Object.prototype.hasOwnProperty.call(operationInput, "partitionKey")) { + if (operationInput.partitionKey === undefined) { + partitionKey = definition.paths.map(() => NonePartitionKeyLiteral); + } else { + partitionKey = convertToInternalPartitionKey(operationInput.partitionKey); + } + } else { + switch (operationInput.operationType) { + case BulkOperationType.Create: + case BulkOperationType.Replace: + case BulkOperationType.Upsert: + partitionKey = assertNotUndefined( + extractPartitionKey(operationInput.resourceBody, definition), + "" + ); + break; + case BulkOperationType.Read: + case BulkOperationType.Delete: + case BulkOperationType.Patch: + partitionKey = definition.paths.map(() => NonePartitionKeyLiteral); + } } - return toHashKey; + return { + operation: { ...operationInput, partitionKey: JSON.stringify(partitionKey) } as Operation, + partitionKey, + }; } -export function decorateOperation( - operation: OperationInput, - definition: PartitionKeyDefinition, - options: RequestOptions = {} -): Operation { +/** + * For operations requiring Id genrate random uuids. + * @param operationInput - OperationInput to be checked. + * @param options - RequestOptions + */ +function populateIdsIfNeeded(operationInput: OperationInput, options: RequestOptions) { if ( - operation.operationType === BulkOperationType.Create || - operation.operationType === BulkOperationType.Upsert + operationInput.operationType === BulkOperationType.Create || + operationInput.operationType === BulkOperationType.Upsert ) { if ( - (operation.resourceBody.id === undefined || operation.resourceBody.id === "") && + (operationInput.resourceBody.id === undefined || operationInput.resourceBody.id === "") && !options.disableAutomaticIdGeneration ) { - operation.resourceBody.id = uuid(); + operationInput.resourceBody.id = uuid(); } } - if ("partitionKey" in operation) { - const extracted = extractPartitionKey(operation, { paths: ["/partitionKey"] }); - return { ...operation, partitionKey: JSON.stringify(extracted) } as Operation; - } else if ( - operation.operationType === BulkOperationType.Create || - operation.operationType === BulkOperationType.Replace || - operation.operationType === BulkOperationType.Upsert - ) { - const pk = extractPartitionKey(operation.resourceBody, definition); - return { ...operation, partitionKey: JSON.stringify(pk) } as Operation; - } else if ( - operation.operationType === BulkOperationType.Read || - operation.operationType === BulkOperationType.Delete - ) { - return { ...operation, partitionKey: "[{}]" }; - } - return operation as Operation; } export function decorateBatchOperation( @@ -227,19 +252,3 @@ export function decorateBatchOperation( } return operation as Operation; } -/** - * Util function for finding partition key values nested in objects at slash (/) separated paths - * @hidden - */ -export function deepFind(document: T, path: P): string | JSONObject { - const apath = path.split("/"); - let h: any = document; - for (const p of apath) { - if (p in h) h = h[p]; - else { - console.warn(`Partition key not found, using undefined: ${path} at ${p}`); - return "{}"; - } - } - return h; -} diff --git a/sdk/cosmosdb/cosmos/src/utils/hashing/hash.ts b/sdk/cosmosdb/cosmos/src/utils/hashing/hash.ts new file mode 100644 index 000000000000..d6375ae14f50 --- /dev/null +++ b/sdk/cosmosdb/cosmos/src/utils/hashing/hash.ts @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { + PartitionKeyDefinition, + PartitionKeyDefinitionVersion, + PartitionKeyKind, + PrimitivePartitionKeyValue, +} from "../../documents"; +import { hashMultiHashPartitionKey } from "./multiHash"; +import { hashV1PartitionKey } from "./v1"; +import { hashV2PartitionKey } from "./v2"; + +/** + * Generate hash of a PartitonKey based on it PartitionKeyDefinition. + * @param partitionKey - to be hashed. + * @param partitionDefinition - container's partitionKey definition + * @returns + */ +export function hashPartitionKey( + partitionKey: PrimitivePartitionKeyValue[], + partitionDefinition: PartitionKeyDefinition +): string { + const kind: PartitionKeyKind = partitionDefinition?.kind || PartitionKeyKind.Hash; // Default value. + const isV2 = + partitionDefinition && + partitionDefinition.version && + partitionDefinition.version === PartitionKeyDefinitionVersion.V2; + switch (kind) { + case PartitionKeyKind.Hash: + return isV2 ? hashV2PartitionKey(partitionKey) : hashV1PartitionKey(partitionKey); + case PartitionKeyKind.MultiHash: + return hashMultiHashPartitionKey(partitionKey); + } +} diff --git a/sdk/cosmosdb/cosmos/src/utils/hashing/multiHash.ts b/sdk/cosmosdb/cosmos/src/utils/hashing/multiHash.ts new file mode 100644 index 000000000000..5c03f80616ff --- /dev/null +++ b/sdk/cosmosdb/cosmos/src/utils/hashing/multiHash.ts @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +import { PrimitivePartitionKeyValue } from "../../documents"; +import { hashV2PartitionKey } from "./v2"; + +/** + * Generate Hash for a `Multi Hash` type partition. + * @param partitionKey - to be hashed. + * @returns + */ +export function hashMultiHashPartitionKey(partitionKey: PrimitivePartitionKeyValue[]): string { + return partitionKey.map((keys) => hashV2PartitionKey([keys])).join(""); +} diff --git a/sdk/cosmosdb/cosmos/src/utils/hashing/v1.ts b/sdk/cosmosdb/cosmos/src/utils/hashing/v1.ts index 66794b8a9924..0d433b1d4692 100644 --- a/sdk/cosmosdb/cosmos/src/utils/hashing/v1.ts +++ b/sdk/cosmosdb/cosmos/src/utils/hashing/v1.ts @@ -5,20 +5,21 @@ import { doubleToByteArrayJSBI, writeNumberForBinaryEncodingJSBI } from "./encod import { writeStringForBinaryEncoding } from "./encoding/string"; import { BytePrefix } from "./encoding/prefix"; import MurmurHash from "./murmurHash"; +import { PrimitivePartitionKeyValue } from "../../documents"; const MAX_STRING_CHARS = 100; -type v1Key = string | number | boolean | null | Record | undefined; - -export function hashV1PartitionKey(partitionKey: v1Key): string { - const toHash = prefixKeyByType(partitionKey); +export function hashV1PartitionKey(partitionKey: PrimitivePartitionKeyValue[]): string { + const key = partitionKey[0]; + const toHash = prefixKeyByType(key); const hash = MurmurHash.x86.hash32(toHash); const encodedJSBI = writeNumberForBinaryEncodingJSBI(hash); - const encodedValue = encodeByType(partitionKey); - return Buffer.concat([encodedJSBI, encodedValue]).toString("hex").toUpperCase(); + const encodedValue = encodeByType(key); + const finalHash = Buffer.concat([encodedJSBI, encodedValue]).toString("hex").toUpperCase(); + return finalHash; } -function prefixKeyByType(key: v1Key): Buffer { +function prefixKeyByType(key: PrimitivePartitionKeyValue): Buffer { let bytes: Buffer; switch (typeof key) { case "string": { @@ -53,7 +54,7 @@ function prefixKeyByType(key: v1Key): Buffer { } } -function encodeByType(key: v1Key): Buffer { +function encodeByType(key: PrimitivePartitionKeyValue): Buffer { switch (typeof key) { case "string": { const truncated = key.substr(0, MAX_STRING_CHARS); diff --git a/sdk/cosmosdb/cosmos/src/utils/hashing/v2.ts b/sdk/cosmosdb/cosmos/src/utils/hashing/v2.ts index d2138195ad7f..d873505df467 100644 --- a/sdk/cosmosdb/cosmos/src/utils/hashing/v2.ts +++ b/sdk/cosmosdb/cosmos/src/utils/hashing/v2.ts @@ -1,21 +1,20 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +import { PrimitivePartitionKeyValue } from "../../documents"; import { doubleToByteArrayJSBI } from "./encoding/number"; import { BytePrefix } from "./encoding/prefix"; import MurmurHash from "./murmurHash"; -type v2Key = string | number | boolean | null | Record | undefined; - -export function hashV2PartitionKey(partitionKey: v2Key): string { - const toHash = prefixKeyByType(partitionKey); +export function hashV2PartitionKey(partitionKey: PrimitivePartitionKeyValue[]): string { + const toHash: Buffer = Buffer.concat(partitionKey.map(prefixKeyByType)); const hash = MurmurHash.x64.hash128(toHash); const reverseBuff: Buffer = reverse(Buffer.from(hash, "hex")); reverseBuff[0] &= 0x3f; return reverseBuff.toString("hex").toUpperCase(); } -function prefixKeyByType(key: v2Key): Buffer { +function prefixKeyByType(key: PrimitivePartitionKeyValue): Buffer { let bytes: Buffer; switch (typeof key) { case "string": { diff --git a/sdk/cosmosdb/cosmos/src/utils/typeChecks.ts b/sdk/cosmosdb/cosmos/src/utils/typeChecks.ts new file mode 100644 index 000000000000..e10fe83a47e0 --- /dev/null +++ b/sdk/cosmosdb/cosmos/src/utils/typeChecks.ts @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/** + * A type which could be any type but undefined + */ +export type NonUndefinable = T extends undefined ? never : T; + +/** + * Utility function to avoid writing boilder plate code while checking for + * undefined values. It throws Error if the input value is undefined. + * @param value - Value which is potentially undefined. + * @param msg - Error Message to throw if value is undefined. + * @returns + */ +export function assertNotUndefined(value: T, msg?: string): NonUndefinable { + if (value !== undefined) { + return value as NonUndefinable; + } + throw new Error(msg || "Unexpected 'undefined' value encountered"); +} diff --git a/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v1.spec.ts b/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v1.spec.ts index 5a72780dfe53..dd2a1fdc0377 100644 --- a/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v1.spec.ts +++ b/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v1.spec.ts @@ -8,56 +8,58 @@ describe("effectivePartitionKey", function () { describe("computes v1 key", function () { const toMatch = [ { - key: "partitionKey", + key: ["partitionKey"], output: "05C1E1B3D9CD2608716273756A756A706F4C667A00", }, { - key: "redmond", + key: ["redmond"], output: "05C1EFE313830C087366656E706F6500", }, { - key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + key: [ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + ], output: "05C1EB5921F706086262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626262626200", }, { - key: "", + key: [""], output: "05C1CF33970FF80800", }, { - key: "aa", + key: ["aa"], output: "05C1C7B7270FE008626200", }, { - key: null, + key: [null], output: "05C1ED45D7475601", }, { - key: true, + key: [true], output: "05C1D7C5A903D803", }, { - key: false, + key: [false], output: "05C1DB857D857C02", }, { - key: {}, + key: [{}], output: "05C1D529E345DC00", }, { - key: 5, + key: [5], output: "05C1D9C1C5517C05C014", }, { - key: 5.5, + key: [5.5], output: "05C1D7A771716C05C016", }, { - key: 12313.1221, + key: [12313.1221], output: "05C1ED154D592E05C0C90723F50FC925D8", }, { - key: 123456789, + key: [123456789], output: "05C1D9E1A5311C05C19DB7CD8B40", }, ]; diff --git a/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v2.spec.ts b/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v2.spec.ts index b1634cfc3506..62e8f933799c 100644 --- a/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v2.spec.ts +++ b/sdk/cosmosdb/cosmos/test/internal/unit/hashing/v2.spec.ts @@ -8,51 +8,53 @@ describe("effectivePartitionKey", function () { describe("computes v2 key", function () { const toMatch = [ { - key: "redmond", + key: ["redmond"], output: "22E342F38A486A088463DFF7838A5963", }, { - key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + key: [ + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + ], output: "0BA3E9CA8EE4C14538828D1612A4B652", }, { - key: "", + key: [""], output: "32E9366E637A71B4E710384B2F4970A0", }, { - key: "aa", + key: ["aa"], output: "05033626483AE80D00E44FBD35362B19", }, { - key: null, + key: [null], output: "378867E4430E67857ACE5C908374FE16", }, { - key: true, + key: [true], output: "0E711127C5B5A8E4726AC6DD306A3E59", }, { - key: false, + key: [false], output: "2FE1BE91E90A3439635E0E9E37361EF2", }, { - key: {}, + key: [{}], output: "11622DAA78F835834610ABE56EFF5CB5", }, { - key: 5, + key: [5], output: "19C08621B135968252FB34B4CF66F811", }, { - key: 5.5, + key: [5.5], output: "0E2EE47829D1AF775EEFB6540FD1D0ED", }, { - key: 12313.1221, + key: [12313.1221], output: "27E7ECA8F2EE3E53424DE8D5220631C6", }, { - key: 123456789, + key: [123456789], output: "1F56D2538088EBA82CCF988F36E16760", }, ]; diff --git a/sdk/cosmosdb/cosmos/test/internal/unit/utils/batch.spec.ts b/sdk/cosmosdb/cosmos/test/internal/unit/utils/batch.spec.ts deleted file mode 100644 index f9af90f73958..000000000000 --- a/sdk/cosmosdb/cosmos/test/internal/unit/utils/batch.spec.ts +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import assert from "assert"; -import { deepFind } from "../../../../src/utils/batch"; - -describe("batch utils", function () { - it("deep finds nested partition key values in objects", function () { - const testTwiceNested = { - nested: { - nested2: { - key: "value", - }, - }, - }; - const testNested = { - nested: { - key: "value", - }, - }; - const testBase = { - key: "value", - }; - assert.equal(deepFind(testNested, "nested/key"), "value"); - assert.equal(deepFind(testBase, "key"), "value"); - assert.equal(deepFind(testTwiceNested, "nested/nested2/key"), "value"); - }); -}); diff --git a/sdk/cosmosdb/cosmos/test/public/common/TestHelpers.ts b/sdk/cosmosdb/cosmos/test/public/common/TestHelpers.ts index 388992e35b66..4b11c6ecc94a 100644 --- a/sdk/cosmosdb/cosmos/test/public/common/TestHelpers.ts +++ b/sdk/cosmosdb/cosmos/test/public/common/TestHelpers.ts @@ -6,6 +6,9 @@ import { CosmosClient, Database, DatabaseDefinition, + extractPartitionKey, + PartitionKey, + PartitionKeyDefinition, PermissionDefinition, RequestOptions, Response, @@ -101,13 +104,11 @@ export async function bulkInsertItems( export async function bulkReadItems( container: Container, documents: any[], - partitionKeyProperty: string + partitionKeyDef: PartitionKeyDefinition ): Promise { return Promise.all( documents.map(async (document) => { - const partitionKey = Object.prototype.hasOwnProperty.call(document, partitionKeyProperty) - ? document[partitionKeyProperty] - : undefined; + const partitionKey = extractPartitionKey(document, partitionKeyDef); // TODO: should we block or do all requests in parallel? const { resource: doc } = await container.item(document.id, partitionKey).read(); @@ -119,13 +120,11 @@ export async function bulkReadItems( export async function bulkReplaceItems( container: Container, documents: any[], - partitionKeyProperty: string + partitionKeyDef: PartitionKeyDefinition ): Promise { return Promise.all( documents.map(async (document) => { - const partitionKey = Object.prototype.hasOwnProperty.call(document, partitionKeyProperty) - ? document[partitionKeyProperty] - : undefined; + const partitionKey = extractPartitionKey(document, partitionKeyDef); const { resource: doc } = await container.item(document.id, partitionKey).replace(document); const { _etag: _1, _ts: _2, ...expectedModifiedDocument } = document; // eslint-disable-line @typescript-eslint/no-unused-vars const { _etag: _4, _ts: _3, ...actualModifiedDocument } = doc; // eslint-disable-line @typescript-eslint/no-unused-vars @@ -138,13 +137,11 @@ export async function bulkReplaceItems( export async function bulkDeleteItems( container: Container, documents: any[], - partitionKeyProperty: string + partitionKeyDef: PartitionKeyDefinition ): Promise { await Promise.all( documents.map(async (document) => { - const partitionKey = Object.prototype.hasOwnProperty.call(document, partitionKeyProperty) - ? document[partitionKeyProperty] - : undefined; + const partitionKey = extractPartitionKey(document, partitionKeyDef); await container.item(document.id, partitionKey).delete(); }) @@ -154,25 +151,31 @@ export async function bulkDeleteItems( export async function bulkQueryItemsWithPartitionKey( container: Container, documents: any[], - partitionKeyPropertyName: string + query: string, + parameterGenerator: (doc: any) => { name: string; value: any }[] ): Promise { for (const document of documents) { - if (!Object.prototype.hasOwnProperty.call(document, partitionKeyPropertyName)) { + const parameters = parameterGenerator(document); + const shouldSkip = parameters.reduce( + (previous, current) => previous || current["value"] === undefined, + false + ); + if (shouldSkip) { continue; } - const querySpec = { - query: "SELECT * FROM root r WHERE r." + partitionKeyPropertyName + "=@key", - parameters: [ - { - name: "@key", - value: document[partitionKeyPropertyName], - }, - ], + query: query, + parameters: parameters, }; const { resources } = await container.items.query(querySpec).fetchAll(); - assert.equal(resources.length, 1, "Expected exactly 1 document"); + assert.equal( + resources.length, + 1, + `Expected exactly 1 document, doc: ${JSON.stringify( + document + )}, query: '${query}', parameters: ${JSON.stringify(parameters)}` + ); assert.equal(JSON.stringify(resources[0]), JSON.stringify(document)); } } @@ -195,13 +198,14 @@ export async function replaceOrUpsertItem( container: Container, body: unknown, options: RequestOptions, - isUpsertTest: boolean + isUpsertTest: boolean, + partitionKey?: PartitionKey ): Promise> { if (isUpsertTest) { return container.items.upsert(body, options); } else { const bodyWithId = body as { id: string }; - return container.item(bodyWithId.id, undefined).replace(body, options); + return container.item(bodyWithId.id, partitionKey).replace(body, options); } } diff --git a/sdk/cosmosdb/cosmos/test/public/functional/item.spec.ts b/sdk/cosmosdb/cosmos/test/public/functional/item.spec.ts index c9c0a4884ea5..95c0d58ddcb6 100644 --- a/sdk/cosmosdb/cosmos/test/public/functional/item.spec.ts +++ b/sdk/cosmosdb/cosmos/test/public/functional/item.spec.ts @@ -3,11 +3,15 @@ import assert from "assert"; import { Suite } from "mocha"; import { + BulkOptions, Container, + ContainerDefinition, + ContainerRequest, CosmosClient, OperationResponse, PatchOperation, PatchOperationType, + RequestOptions, } from "../../../src"; import { ItemDefinition } from "../../../src"; import { @@ -26,58 +30,205 @@ import { import { BulkOperationType, OperationInput } from "../../../src"; import { endpoint } from "../common/_testConfig"; import { masterKey } from "../common/_fakeTestSecrets"; +import { + PartitionKey, + PartitionKeyDefinition, + PartitionKeyDefinitionVersion, + PartitionKeyKind, +} from "../../../src/documents"; + +/** + * Tests Item api. + * Nomenclature + * V1 Container - containerDefinition.partitionKey.version is undefined or 1 + * V2 Container - containerDefinition.partitionKey.version is 2 + * Single Partition Container - Container with only one physical partition. + * Multi Partition Container - Container with more than one physical partition. + * Hierarchical Partition Container - Container with more than one level of hierarchy of Partition Keys i.e. ['key1', 'key2'] + * Nested Partition Key - Partition Key composed of value which is nested in the document. i.e ['/Address/Zip'] + */ interface TestItem { id?: string; name?: string; foo?: string; - key?: string; + key?: string | number | boolean; + key2?: string | number | boolean; replace?: string; + nested1?: { + nested2: { + nested3: string | number | boolean; + }; + }; + prop?: number; } -describe("Item CRUD", function (this: Suite) { - this.timeout(process.env.MOCHA_TIMEOUT || 10000); +type CRUDTestDataSet = { + // Container to create. + containerDef: ContainerDefinition; + // item to create + itemDef: TestItem; + // item to replace it with + replaceItemDef: TestItem; + // Partition key to use for operations on original item + originalItemPartitionKey?: PartitionKey; + // Partition key to use for operations on replaced item + replacedItemPartitionKey?: PartitionKey; + propertyToCheck?: string[]; +}; + +function extractNestedPropertyFromObject(obj: any, paths: string[] = []) { + paths.reduce((ob: any, path: string) => { + if (ob !== null && ob !== undefined && typeof ob === "object") { + return ob[path]; + } else { + throw new Error(`The property ${path} doesn't exisit in object`); + } + }, obj); +} + +type MultiCRUDTestDataSet = { + dbName: string; + containerDef: ContainerDefinition; + partitinKeyDef: PartitionKeyDefinition; + containerRequestOps: RequestOptions; + documents: TestItem[]; + singleDocFetchQuery: string; + parameterGenerator: (doc: any) => { name: string; value: any }[]; +}; + +/** + * Helper function to run Create Upsert Read Update Replace Delete operation on Items. + * @param dataset - CRUDTestDataSet + * @param isUpsertTest - is upsert is to be tested instead of create. + */ +async function CRUDTestRunner(dataset: CRUDTestDataSet, isUpsertTest: boolean): Promise { + // create database + const database = await getTestDatabase("sample 中文 database"); + // create container + const { resource: containerdef } = await database.containers.create(dataset.containerDef); + const container: Container = database.container(containerdef.id); + + // read items on empty container + const { resources: items } = await container.items.readAll().fetchAll(); + assert(Array.isArray(items), "Value should be an array"); + + // create an item + const beforeCreateDocumentsCount = items.length; + + const itemDefinition = dataset.itemDef; + try { + await createOrUpsertItem( + container, + itemDefinition, + { disableAutomaticIdGeneration: true }, + isUpsertTest + ); + assert.fail("id generation disabled must throw with invalid id"); + } catch (err: any) { + assert(err !== undefined, "should throw an error because automatic id generation is disabled"); + } + + // create or upsert + const { resource: document } = await createOrUpsertItem( + container, + itemDefinition, + undefined, + isUpsertTest + ); + assert.equal( + extractNestedPropertyFromObject(document, dataset.propertyToCheck), + extractNestedPropertyFromObject(itemDefinition, dataset.propertyToCheck) + ); + assert(document.id !== undefined); + // read documents after creation + const { resources: documents2 } = await container.items.readAll().fetchAll(); + assert.equal( + documents2.length, + beforeCreateDocumentsCount + 1, + "create should increase the number of documents" + ); + // query documents + const querySpec = { + query: "SELECT * FROM root r WHERE r.id=@id", + parameters: [ + { + name: "@id", + value: document.id, + }, + ], + }; + const { resources: results } = await container.items.query(querySpec).fetchAll(); + assert(results.length > 0, "number of results for the query should be > 0"); + const { resources: results2 } = await container.items.query(querySpec).fetchAll(); + assert(results2.length > 0, "number of results for the query should be > 0"); + + // replace document + const replaceDocument: TestItem = { ...dataset.replaceItemDef, id: document.id }; + const { resource: replacedDocument } = await replaceOrUpsertItem( + container, + replaceDocument, + undefined, + isUpsertTest, + dataset.originalItemPartitionKey + ); + assert.equal(replacedDocument.name, replaceDocument.name, "document name property should change"); + assert.equal(replacedDocument.foo, replaceDocument.foo, "property should have changed"); + assert.equal(document.id, replacedDocument.id, "document id should stay the same"); + // read document + const response2 = await container + .item(replacedDocument.id, dataset.replacedItemPartitionKey) + .read(); + const document2 = response2.resource; + assert.equal(replacedDocument.id, document2.id); + assert.equal(typeof response2.requestCharge, "number"); + // delete document + await container.item(replacedDocument.id, dataset.replacedItemPartitionKey).delete(); + + // read documents after deletion + const response = await container.item(replacedDocument.id, undefined).read(); + assert.equal(response.statusCode, 404, "response should return error code 404"); + assert.equal(response.resource, undefined); + + // update document +} + +describe("Item CRUD hierarchical partition", function (this: Suite) { beforeEach(async function () { await removeAllDatabases(); }); - const documentCRUDTest = async function (isUpsertTest: boolean): Promise { - // create database - const database = await getTestDatabase("sample 中文 database"); - // create container - const { resource: containerdef } = await database.containers.create({ id: "sample container" }); + it("hierarchycal partitions", async function () { + const dbName = "hierarchical partition db"; + const database = await getTestDatabase(dbName); + const containerDef = { + id: "sample container", + partitionKey: { + paths: ["/foo", "/key"], + version: PartitionKeyDefinitionVersion.V2, + kind: PartitionKeyKind.MultiHash, + }, + }; + const itemDefinition: TestItem = { + name: "sample document", + foo: "bar", + key: "value", + replace: "new property", + }; + + const { resource: containerdef } = await database.containers.create(containerDef); const container: Container = database.container(containerdef.id); // read items const { resources: items } = await container.items.readAll().fetchAll(); assert(Array.isArray(items), "Value should be an array"); - // create an item const beforeCreateDocumentsCount = items.length; - const itemDefinition: TestItem = { - name: "sample document", - foo: "bar", - key: "value", - replace: "new property", - }; - try { - await createOrUpsertItem( - container, - itemDefinition, - { disableAutomaticIdGeneration: true }, - isUpsertTest - ); - assert.fail("id generation disabled must throw with invalid id"); - } catch (err: any) { - assert( - err !== undefined, - "should throw an error because automatic id generation is disabled" - ); - } + const { resource: document } = await createOrUpsertItem( container, itemDefinition, undefined, - isUpsertTest + false ); assert.equal(document.name, itemDefinition.name); assert(document.id !== undefined); @@ -88,92 +239,29 @@ describe("Item CRUD", function (this: Suite) { beforeCreateDocumentsCount + 1, "create should increase the number of documents" ); - // query documents - const querySpec = { - query: "SELECT * FROM root r WHERE r.id=@id", - parameters: [ - { - name: "@id", - value: document.id, - }, - ], - }; - const { resources: results } = await container.items.query(querySpec).fetchAll(); - assert(results.length > 0, "number of results for the query should be > 0"); - const { resources: results2 } = await container.items.query(querySpec).fetchAll(); - assert(results2.length > 0, "number of results for the query should be > 0"); - - // replace document - document.name = "replaced document"; - document.foo = "not bar"; - const { resource: replacedDocument } = await replaceOrUpsertItem( - container, - document, - undefined, - isUpsertTest - ); - assert.equal( - replacedDocument.name, - "replaced document", - "document name property should change" - ); - assert.equal(replacedDocument.foo, "not bar", "property should have changed"); - assert.equal(document.id, replacedDocument.id, "document id should stay the same"); - // read document - const response2 = await container.item(replacedDocument.id, undefined).read(); - const document2 = response2.resource; - assert.equal(replacedDocument.id, document2.id); - assert.equal(typeof response2.requestCharge, "number"); - // delete document - await container.item(replacedDocument.id, undefined).delete(); - - // read documents after deletion - const response = await container.item(replacedDocument.id, undefined).read(); - assert.equal(response.statusCode, 404, "response should return error code 404"); - assert.equal(response.resource, undefined); - }; - - it("Should do document CRUD operations successfully", async function () { - await documentCRUDTest(false); }); +}); - it("Should do document CRUD operations successfully with upsert", async function () { - await documentCRUDTest(true); +describe("Create, Upsert, Read, Update, Replace, Delete Operations on Item", function (this: Suite) { + this.timeout(process.env.MOCHA_TIMEOUT || 10000); + beforeEach(async function () { + await removeAllDatabases(); }); - it("Should do document CRUD operations over multiple partitions", async function () { - // create database - const database = await getTestDatabase("db1"); - const partitionKey = "key"; - - // create container - const containerDefinition = { - id: "coll1", - partitionKey: { paths: ["/" + partitionKey] }, - }; - - const { resource: containerdef } = await database.containers.create(containerDefinition, { - offerThroughput: 12000, - }); + async function multipelPartitionCRUDTest(dataset: MultiCRUDTestDataSet): Promise { + const database = await getTestDatabase(dataset.dbName); + const { resource: containerdef } = await database.containers.create( + { ...dataset.containerDef, partitionKey: dataset.partitinKeyDef }, + dataset.containerRequestOps + ); const container = database.container(containerdef.id); + let returnedDocuments = await bulkInsertItems(container, dataset.documents); - const documents = [ - { id: "document1" }, - { id: "document2", key: null, prop: 1 }, - { id: "document3", key: false, prop: 1 }, - { id: "document4", key: true, prop: 1 }, - { id: "document5", key: 1, prop: 1 }, - { id: "document6", key: "A", prop: 1 }, - { id: "document7", key: "", prop: 1 }, - ]; - - let returnedDocuments = await bulkInsertItems(container, documents); - - assert.equal(returnedDocuments.length, documents.length); + assert.equal(returnedDocuments.length, dataset.documents.length); returnedDocuments.sort(function (doc1, doc2) { return doc1.id.localeCompare(doc2.id); }); - await bulkReadItems(container, returnedDocuments, partitionKey); + await bulkReadItems(container, returnedDocuments, dataset.partitinKeyDef); const { resources: successDocuments } = await container.items.readAll().fetchAll(); assert(successDocuments !== undefined, "error reading documents"); assert.equal( @@ -189,13 +277,21 @@ describe("Item CRUD", function (this: Suite) { JSON.stringify(returnedDocuments), "Unexpected documents are returned" ); - returnedDocuments.forEach(function (document) { document.prop ? ++document.prop : null; // eslint-disable-line no-unused-expressions }); - const newReturnedDocuments = await bulkReplaceItems(container, returnedDocuments, partitionKey); + const newReturnedDocuments = await bulkReplaceItems( + container, + returnedDocuments, + dataset.partitinKeyDef + ); returnedDocuments = newReturnedDocuments; - await bulkQueryItemsWithPartitionKey(container, returnedDocuments, partitionKey); + await bulkQueryItemsWithPartitionKey( + container, + returnedDocuments, + dataset.singleDocFetchQuery, + dataset.parameterGenerator + ); const querySpec = { query: "SELECT * FROM Root", }; @@ -217,520 +313,1143 @@ describe("Item CRUD", function (this: Suite) { "Unexpected query results" ); - await bulkDeleteItems(container, returnedDocuments, partitionKey); - }); - - it("Should auto generate an id for a collection partitioned on id", async function () { - // https://github.com/Azure/azure-sdk-for-js/issues/9734 - const container = await getTestContainer("db1", undefined, { partitionKey: "/id" }); - const { resource } = await container.items.create({}); - assert.ok(resource.id); - }); -}); - -describe("bulk/batch item operations", function () { - describe("with v1 container", function () { - let container: Container; - let readItemId: string; - let replaceItemId: string; - let deleteItemId: string; - before(async function () { - container = await getTestContainer("bulk container", undefined, { - partitionKey: { - paths: ["/key"], - version: undefined, - }, - throughput: 25100, - }); - readItemId = addEntropy("item1"); - await container.items.create({ - id: readItemId, - key: "A", - class: "2010", - }); - deleteItemId = addEntropy("item2"); - await container.items.create({ - id: deleteItemId, - key: "A", - class: "2010", - }); - replaceItemId = addEntropy("item3"); - await container.items.create({ - id: replaceItemId, - key: 5, - class: "2010", - }); - }); - after(async () => { - await container.database.delete(); - }); - it("handles create, upsert, replace, delete", async function () { - const operations = [ - { - operationType: BulkOperationType.Create, - resourceBody: { id: addEntropy("doc1"), name: "sample", key: "A" }, - }, + await bulkDeleteItems(container, returnedDocuments, dataset.partitinKeyDef); + } + const dataSetForDefaultPartitionKey: CRUDTestDataSet = { + containerDef: { id: "sample container" }, + itemDef: { + name: "sample document", + foo: "bar", + key: "value", + replace: "new property", + }, + replaceItemDef: { + name: "replaced document", + foo: "not bar", + key: "value", + replace: "new property", + }, + originalItemPartitionKey: undefined, + replacedItemPartitionKey: undefined, + propertyToCheck: ["name"], + }; + const dataSetForHierarchicalPartitionKey: CRUDTestDataSet = { + containerDef: { + id: "sample container", + partitionKey: { + paths: ["/key", "/key2"], + version: PartitionKeyDefinitionVersion.V2, + kind: PartitionKeyKind.MultiHash, + }, + }, + itemDef: { + name: "sample document", + foo: "bar", + key2: "value2", + key: "value", + replace: "new property", + }, + replaceItemDef: { + name: "replaced document", + foo: "not bar", + key2: "value2", + key: "value", + replace: "new property", + }, + originalItemPartitionKey: ["value", "value2"], + replacedItemPartitionKey: ["value", "value2"], + }; + const multiCrudDataset1: MultiCRUDTestDataSet = { + dbName: "db1", + partitinKeyDef: { + paths: ["/key"], + }, + containerDef: { + id: "col1", + }, + documents: [ + { id: "document1" }, + { id: "document2", key: null, key2: null, prop: 1 }, + { id: "document3", key: false, key2: false, prop: 1 }, + { id: "document4", key: true, key2: true, prop: 1 }, + { id: "document5", key: 1, key2: 1, prop: 1 }, + { id: "document6", key: "A", key2: "A", prop: 1 }, + { id: "document7", key: "", key2: "", prop: 1 }, + ], + containerRequestOps: { + offerThroughput: 12000, + }, + singleDocFetchQuery: "SELECT * FROM root r WHERE r.key=@key", + parameterGenerator: (doc: any) => { + return [ { - operationType: BulkOperationType.Upsert, - partitionKey: "A", - resourceBody: { id: addEntropy("doc2"), name: "other", key: "A" }, - }, - { - operationType: BulkOperationType.Read, - id: readItemId, - partitionKey: "A", + name: "@key", + value: doc["key"], }, + ]; + }, + }; + const multiCrudDatasetWithHierarchicalPartition: MultiCRUDTestDataSet = { + dbName: "db1", + partitinKeyDef: { + paths: ["/key", "/key2"], + version: PartitionKeyDefinitionVersion.V2, + kind: PartitionKeyKind.MultiHash, + }, + containerDef: { + id: "col1", + }, + documents: [ + { id: "document1" }, + { id: "document2", key: null, key2: null, prop: 1 }, + { id: "document3", key: false, key2: false, prop: 1 }, + { id: "document4", key: true, key2: true, prop: 1 }, + { id: "document5", key: 1, key2: 1, prop: 1 }, + { id: "document6", key: "A", key2: "A", prop: 1 }, + { id: "document7", key: "", key2: "", prop: 1 }, + ], + containerRequestOps: { + offerThroughput: 12000, + }, + singleDocFetchQuery: "SELECT * FROM root r WHERE r.key=@key and r.key2=@key2", + parameterGenerator: (doc: any) => { + return [ { - operationType: BulkOperationType.Delete, - id: deleteItemId, - partitionKey: "A", + name: "@key", + value: doc["key"], }, { - operationType: BulkOperationType.Replace, - partitionKey: 5, - id: replaceItemId, - resourceBody: { id: replaceItemId, name: "nice", key: 5 }, + name: "@key2", + value: doc["key2"], }, ]; - const response = await container.items.bulk(operations); - // Create - assert.equal(response[0].resourceBody.name, "sample"); - assert.equal(response[0].statusCode, 201); - // Upsert - assert.equal(response[1].resourceBody.name, "other"); - assert.equal(response[1].statusCode, 201); - // Read - assert.equal(response[2].resourceBody.class, "2010"); - assert.equal(response[2].statusCode, 200); - // Delete - assert.equal(response[3].statusCode, 204); - // Replace - assert.equal(response[4].resourceBody.name, "nice"); - assert.equal(response[4].statusCode, 200); - }); - }); - describe("with v2 container", function () { - let v2Container: Container; - let readItemId: string; - let replaceItemId: string; - let patchItemId: string; - let deleteItemId: string; - before(async function () { - const client = new CosmosClient({ key: masterKey, endpoint }); - const db = await client.databases.createIfNotExists({ id: "patchDb" }); - const database = db.database; - const response = await database.containers.createIfNotExists({ - id: "patchContainer", - partitionKey: { - paths: ["/key"], - version: 2, - }, - throughput: 25100, - }); - v2Container = response.container; - readItemId = addEntropy("item1"); - await v2Container.items.create({ - id: readItemId, - key: true, - class: "2010", + }, + }; + + describe("V1 Container", async () => { + describe("Single Partition Container", async () => { + it("Should do document CRUD operations successfully : container with default partition key", async function () { + await CRUDTestRunner(dataSetForDefaultPartitionKey, false); }); - deleteItemId = addEntropy("item2"); - await v2Container.items.create({ - id: deleteItemId, - key: {}, - class: "2011", + }); + describe("Multi Partition Container", async () => { + it("Should do document CRUD operations successfully with upsert : container with default partition key", async function () { + await CRUDTestRunner(dataSetForDefaultPartitionKey, true); }); - replaceItemId = addEntropy("item3"); - await v2Container.items.create({ - id: replaceItemId, - key: 5, - class: "2012", + }); + }); + + describe("V2 Container", async () => { + describe("Multi Partition Container", async () => { + it("Should do document CRUD operations successfully : container with hierarchical partition key", async function () { + await CRUDTestRunner(dataSetForHierarchicalPartitionKey, false); }); - patchItemId = addEntropy("item4"); - await v2Container.items.create({ - id: patchItemId, - key: 5, - class: "2019", + it("Should do document CRUD operations successfully with upsert : container with hierarchical partition key", async function () { + await CRUDTestRunner(dataSetForHierarchicalPartitionKey, true); }); }); - it("handles create, upsert, patch, replace, delete", async function () { - const operations = [ - { - operationType: BulkOperationType.Create, - partitionKey: "A", - resourceBody: { id: addEntropy("doc1"), name: "sample", key: "A" }, - }, - { - operationType: BulkOperationType.Upsert, - partitionKey: "U", - resourceBody: { name: "other", key: "U" }, - }, - { - operationType: BulkOperationType.Read, + }); + + it("Document CRUD over multiple partition: Single partition key", async function () { + await multipelPartitionCRUDTest(multiCrudDataset1); + }); + + it("Document CRUD over multiple partition : Hierarchical partitions", async function () { + await multipelPartitionCRUDTest(multiCrudDatasetWithHierarchicalPartition); + }); + + it("Should auto generate an id for a collection partitioned on id", async function () { + // https://github.com/Azure/azure-sdk-for-js/issues/9734 + const container = await getTestContainer("db1", undefined, { partitionKey: "/id" }); + const { resource } = await container.items.create({}); + assert.ok(resource.id); + }); +}); +// TODO: Non-deterministic test. We can't guarantee we see any response with a 429 status code since the retries happen within the response +describe("item read retries", async function () { + it("retries on 429", async function () { + const client = new CosmosClient({ key: masterKey, endpoint }); + const { resource: db } = await client.databases.create({ + id: `small db ${Math.random() * 1000}`, + }); + const containerResponse = await client + .database(db.id) + .containers.create({ id: `small container ${Math.random() * 1000}`, throughput: 400 }); + const container = containerResponse.container; + await container.items.create({ id: "readme" }); + const arr = new Array(400); + const promises = []; + for (let i = 0; i < arr.length; i++) { + promises.push(container.item("readme").read()); + } + const resp = await Promise.all(promises); + assert.equal(resp[0].statusCode, 200); + }); +}); + +describe("bulk/batch item operations", async function () { + describe("test bulk operations", async function () { + describe("v1 multi partition container", async function () { + let container: Container; + let readItemId: string; + let replaceItemId: string; + let deleteItemId: string; + before(async function () { + container = await getTestContainer("bulk container", undefined, { + partitionKey: { + paths: ["/key"], + version: undefined, + }, + throughput: 25100, + }); + readItemId = addEntropy("item1"); + await container.items.create({ id: readItemId, - partitionKey: true, - }, - { - operationType: BulkOperationType.Delete, + key: "A", + class: "2010", + }); + deleteItemId = addEntropy("item2"); + await container.items.create({ id: deleteItemId, - partitionKey: {}, - }, - { - operationType: BulkOperationType.Replace, + key: "A", + class: "2010", + }); + replaceItemId = addEntropy("item3"); + await container.items.create({ id: replaceItemId, - resourceBody: { id: replaceItemId, name: "nice", key: 5 }, - }, - { - operationType: BulkOperationType.Patch, - partitionKey: 5, - id: patchItemId, - resourceBody: { - operations: [{ op: PatchOperationType.add, path: "/great", value: "goodValue" }], - }, - }, - { - operationType: BulkOperationType.Patch, - partitionKey: 5, - id: patchItemId, - resourceBody: { - operations: [{ op: PatchOperationType.add, path: "/good", value: "greatValue" }], - condition: "from c where NOT IS_DEFINED(c.newImproved)", - }, - }, - ]; - const response = await v2Container.items.bulk(operations); - // Create - assert.strictEqual(response[0].resourceBody.name, "sample"); - assert.strictEqual(response[0].statusCode, 201); - // Upsert - assert.strictEqual(response[1].resourceBody.name, "other"); - assert.strictEqual(response[1].statusCode, 201); - // Read - assert.strictEqual(response[2].resourceBody.class, "2010"); - assert.strictEqual(response[2].statusCode, 200); - // Delete - assert.strictEqual(response[3].statusCode, 204); - // Replace - assert.strictEqual(response[4].resourceBody.name, "nice"); - assert.strictEqual(response[4].statusCode, 200); - // Patch - assert.strictEqual(response[5].resourceBody.great, "goodValue"); - assert.strictEqual(response[5].statusCode, 200); - }); - it("respects order", async function () { - readItemId = addEntropy("item1"); - await v2Container.items.create({ - id: readItemId, - key: "A", - class: "2010", + key: 5, + class: "2010", + }); }); - const operations = [ - { - operationType: BulkOperationType.Delete, - id: readItemId, - partitionKey: "A", - }, - { - operationType: BulkOperationType.Read, - id: readItemId, - partitionKey: "A", - }, - ]; - const response = await v2Container.items.bulk(operations); - assert.equal(response[0].statusCode, 204); - // Delete occurs first, so the read returns a 404 - assert.equal(response[1].statusCode, 404); - }); - it("424 errors for operations after an error", async function () { - const operations = [ - { - operationType: BulkOperationType.Create, - resourceBody: { - ttl: -10, - key: "A", + after(async () => { + await container.database.delete(); + }); + it("handles create, upsert, replace, delete", async function () { + const operations = [ + { + operationType: BulkOperationType.Create, + resourceBody: { id: addEntropy("doc1"), name: "sample", key: "A" }, }, - }, - { - operationType: BulkOperationType.Create, - resourceBody: { - key: "A", - licenseType: "B", - id: "o239uroihndsf", + { + operationType: BulkOperationType.Upsert, + partitionKey: "A", + resourceBody: { id: addEntropy("doc2"), name: "other", key: "A" }, }, - }, - ]; - const response = await v2Container.items.bulk(operations); - assert.equal(response[1].statusCode, 424); - }); - it("Continues after errors with continueOnError true", async function () { - const operations = [ - { - operationType: BulkOperationType.Create, - resourceBody: { - ttl: -10, - key: "A", + { + operationType: BulkOperationType.Read, + id: readItemId, + partitionKey: "A", }, - }, - { - operationType: BulkOperationType.Create, - resourceBody: { - key: "A", - licenseType: "B", - id: addEntropy("sifjsiof"), + { + operationType: BulkOperationType.Delete, + id: deleteItemId, + partitionKey: "A", }, - }, - ]; - const response = await v2Container.items.bulk(operations, { continueOnError: true }); - assert.strictEqual(response[1].statusCode, 201); - }); - it("autogenerates IDs for Create operations", async function () { - const operations = [ - { - operationType: BulkOperationType.Create, - resourceBody: { - key: "A", - licenseType: "C", + { + operationType: BulkOperationType.Replace, + partitionKey: 5, + id: replaceItemId, + resourceBody: { id: replaceItemId, name: "nice", key: 5 }, }, - }, - ]; - const response = await v2Container.items.bulk(operations); - assert.equal(response[0].statusCode, 201); - }); - it("handles operations with null, undefined, and 0 partition keys", async function () { - const item1Id = addEntropy("item1"); - const item2Id = addEntropy("item2"); - const item3Id = addEntropy("item2"); - await v2Container.items.create({ - id: item1Id, - key: null, - class: "2010", - }); - await v2Container.items.create({ - id: item2Id, - key: 0, - }); - await v2Container.items.create({ - id: item3Id, - key: undefined, - }); - const operations: OperationInput[] = [ - { - operationType: BulkOperationType.Read, - id: item1Id, - partitionKey: null, - }, - { - operationType: BulkOperationType.Read, - id: item2Id, - partitionKey: 0, - }, - { - operationType: BulkOperationType.Read, - id: item3Id, - partitionKey: undefined, - }, - ]; - const response = await v2Container.items.bulk(operations); - assert.equal(response[0].statusCode, 200); - assert.equal(response[1].statusCode, 200); - assert.equal(response[2].statusCode, 200); - }); - }); - describe("v2 single partition container", async function () { - let container: Container; - let deleteItemId: string; - before(async function () { - container = await getTestContainer("bulk container"); - deleteItemId = addEntropy("item2"); - await container.items.create({ - id: deleteItemId, - key: "A", - class: "2010", - }); - }); - it("deletes an item with default partition", async function () { - const operation: OperationInput = { - operationType: BulkOperationType.Delete, - id: deleteItemId, - }; - - const deleteResponse = await container.items.bulk([operation]); - assert.equal(deleteResponse[0].statusCode, 204); - }); - }); - describe("v2 multi partition container", async function () { - let container: Container; - let createItemId: string; - let upsertItemId: string; - before(async function () { - container = await getTestContainer("bulk container", undefined, { - partitionKey: { - paths: ["/nested/key"], - version: 2, - }, - throughput: 25100, + ]; + const response = await container.items.bulk(operations); + // Create + assert.equal(response[0].resourceBody.name, "sample"); + assert.equal(response[0].statusCode, 201); + // Upsert + assert.equal(response[1].resourceBody.name, "other"); + assert.equal(response[1].statusCode, 201); + // Read + assert.equal(response[2].resourceBody.class, "2010"); + assert.equal(response[2].statusCode, 200); + // Delete + assert.equal(response[3].statusCode, 204); + // Replace + assert.equal(response[4].resourceBody.name, "nice"); + assert.equal(response[4].statusCode, 200); }); - createItemId = addEntropy("createItem"); - upsertItemId = addEntropy("upsertItem"); }); - it("creates an item with nested object partition key", async function () { - const operations: OperationInput[] = [ - { - operationType: BulkOperationType.Create, - resourceBody: { - id: createItemId, - nested: { - key: "A", - }, + describe("v2 container", function () { + describe("multi partition container", async function () { + let readItemId: string; + let replaceItemId: string; + let patchItemId: string; + let deleteItemId: string; + type BulkTestItem = { + id: string; + key: any; + key2?: any; + key3?: any; + class?: string; + }; + type BulkTestDataSet = { + dbName: string; + containerRequest: ContainerRequest; + documentToCreate: BulkTestItem[]; + bulkOperationOptions: BulkOptions; + operations: { + description?: string; + operation: OperationInput; + expectedOutput?: { + description?: string; + statusCode: number; + propertysToMatch: { + name: string; + value: any; + }[]; + }; + }[]; + }; + const defaultBulkTestDataSet: BulkTestDataSet = { + dbName: "bulkTestDB", + bulkOperationOptions: { + continueOnError: false, }, - }, - { - operationType: BulkOperationType.Upsert, - resourceBody: { - id: upsertItemId, - nested: { - key: false, + containerRequest: { + id: "patchContainer", + partitionKey: { + paths: ["/key"], + version: 2, }, + throughput: 25100, }, - }, - ]; - - const createResponse = await container.items.bulk(operations); - assert.equal(createResponse[0].statusCode, 201); - }); - }); + documentToCreate: [], + operations: [], + }; + async function runBulkTestDataSet(dataset: BulkTestDataSet) { + const client = new CosmosClient({ key: masterKey, endpoint }); + const db = await client.databases.createIfNotExists({ id: dataset.dbName }); + const database = db.database; + const { container } = await database.containers.createIfNotExists( + dataset.containerRequest + ); + try { + for (const doc of dataset.documentToCreate) { + await container.items.create(doc); + } + const response = await container.items.bulk( + dataset.operations.map((value) => value.operation), + dataset.bulkOperationOptions + ); + dataset.operations.forEach(({ description, expectedOutput }, index) => { + if (expectedOutput) { + assert.strictEqual( + response[index].statusCode, + expectedOutput.statusCode, + `Failed during - ${description}` + ); + expectedOutput.propertysToMatch.forEach(({ name, value }) => { + assert.strictEqual( + response[index].resourceBody[name], + value, + `Failed during - ${description}` + ); + }); + } + }); + } finally { + await database.delete(); + } + } + function createBulkOperation( + operationType: any, + partitionKeySpecifier?: { partitionKey?: PartitionKey }, + resourceBody?: any, + id?: string + ): OperationInput { + let op: OperationInput = { + operationType, + resourceBody, + ...partitionKeySpecifier, + }; + if (resourceBody !== undefined) op = { ...op, resourceBody }; + if (id !== undefined) op = { ...op, id } as any; + return op; + } + function creatreBulkOperationExpectedOutput( + statusCode: number, + propertysToMatch: { name: string; value: any }[] + ): { + statusCode: number; + propertysToMatch: { + name: string; + value: any; + }[]; + } { + return { + statusCode, + propertysToMatch, + }; + } + describe("handles create, upsert, patch, replace, delete", async function () { + it("Hierarchical Partitions with two keys", async function () { + readItemId = addEntropy("item1"); + const createItemWithBooleanPartitionKeyId = addEntropy( + "createItemWithBooleanPartitionKeyId" + ); + const createItemWithStringPartitionKeyId = addEntropy( + "createItemWithStringPartitionKeyId" + ); + const createItemWithUnknownPartitionKeyId = addEntropy( + "createItemWithUnknownPartitionKeyId" + ); + const createItemWithNumberPartitionKeyId = addEntropy( + "createItemWithNumberPartitionKeyId" + ); + replaceItemId = addEntropy("item3"); + patchItemId = addEntropy("item4"); + deleteItemId = addEntropy("item2"); + const dataset: BulkTestDataSet = { + dbName: "hierarchical partition bulk", + containerRequest: { + id: "patchContainer", + partitionKey: { + paths: ["/key", "/key2"], + version: PartitionKeyDefinitionVersion.V2, + kind: PartitionKeyKind.MultiHash, + }, + throughput: 25100, + }, + bulkOperationOptions: { + continueOnError: false, + }, + documentToCreate: [ + { id: readItemId, key: true, key2: true, class: "2010" }, + { id: createItemWithBooleanPartitionKeyId, key: true, key2: false, class: "2010" }, + { id: createItemWithUnknownPartitionKeyId, key: {}, key2: {}, class: "2010" }, + { id: createItemWithNumberPartitionKeyId, key: 0, key2: 3, class: "2010" }, + { id: createItemWithStringPartitionKeyId, key: 5, key2: {}, class: "2010" }, + { id: deleteItemId, key: {}, key2: {}, class: "2011" }, + { id: replaceItemId, key: 5, key2: 5, class: "2012" }, + { id: patchItemId, key: 5, key2: 5, class: "2019" }, + ], + operations: [ + { + description: "Read document with partitionKey containing booleans values.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: [true, false] }, + undefined, + createItemWithBooleanPartitionKeyId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "class", value: "2010" }, + ]), + }, + { + description: "Read document with partitionKey containing unknown values.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: [{}, {}] }, + undefined, + createItemWithUnknownPartitionKeyId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "class", value: "2010" }, + ]), + }, + { + description: + "Creating operation's partitionKey to undefined value should fail since internally it would map to [{},{}].", + operation: createBulkOperation( + BulkOperationType.Create, + { partitionKey: undefined }, + { id: addEntropy("doc10"), name: "sample", key: "A", key2: "B" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(400, []), + }, + { + description: "Read document with partitionKey containing Number values.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: [0, 3] }, + undefined, + createItemWithNumberPartitionKeyId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "class", value: "2010" }, + ]), + }, + { + description: "Creating document with partitionKey containing 2 strings.", + operation: createBulkOperation( + BulkOperationType.Create, + { partitionKey: ["A", "B"] }, + { id: addEntropy("doc1"), name: "sample", key: "A", key2: "B" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(201, [ + { name: "name", value: "sample" }, + ]), + }, + { + description: "Creating document with mismatching partition key.", + operation: createBulkOperation( + BulkOperationType.Create, + { partitionKey: ["A", "V"] }, + { id: addEntropy("doc1"), name: "sample", key: "A", key2: "B" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(400, []), + }, + { + description: "Upsert document with partitionKey containing 2 strings.", + operation: createBulkOperation( + BulkOperationType.Upsert, + { partitionKey: ["U", "V"] }, + { name: "other", key: "U", key2: "V" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(201, [ + { name: "name", value: "other" }, + ]), + }, + { + description: "Read document with partitionKey containing 2 booleans.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: [true, true] }, + undefined, + readItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "class", value: "2010" }, + ]), + }, + { + description: + "Delete document with partitionKey containing 2 undefined partition keys.", + operation: createBulkOperation( + BulkOperationType.Delete, + { partitionKey: [{}, {}] }, + undefined, + deleteItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(204, []), + }, + { + description: "Replace document without specifying partition key.", + operation: createBulkOperation( + BulkOperationType.Replace, + {}, + { id: replaceItemId, name: "nice", key: 5, key2: 5 }, + replaceItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "name", value: "nice" }, + ]), + }, + { + description: "Patch document with partitionKey containing 2 Numbers.", + operation: createBulkOperation( + BulkOperationType.Patch, + { partitionKey: [5, 5] }, + { + operations: [ + { op: PatchOperationType.add, path: "/great", value: "goodValue" }, + ], + }, + patchItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "great", value: "goodValue" }, + ]), + }, + { + description: "Conditional Patch document with partitionKey containing 2 Numbers.", + operation: createBulkOperation( + BulkOperationType.Patch, + { partitionKey: [5, 5] }, + { + operations: [ + { op: PatchOperationType.add, path: "/good", value: "greatValue" }, + ], + condition: "from c where NOT IS_DEFINED(c.newImproved)", + }, + patchItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, []), + }, + ], + }; + await runBulkTestDataSet(dataset); + }); + it("Hierarchical Partitions with three keys", async function () { + readItemId = addEntropy("item1"); + const createItemWithBooleanPartitionKeyId = addEntropy( + "createItemWithBooleanPartitionKeyId" + ); + const createItemWithStringPartitionKeyId = addEntropy( + "createItemWithStringPartitionKeyId" + ); + const createItemWithUnknownPartitionKeyId = addEntropy( + "createItemWithUnknownPartitionKeyId" + ); + const createItemWithNumberPartitionKeyId = addEntropy( + "createItemWithNumberPartitionKeyId" + ); + replaceItemId = addEntropy("item3"); + patchItemId = addEntropy("item4"); + deleteItemId = addEntropy("item2"); + const dataset: BulkTestDataSet = { + dbName: "hierarchical partition bulk", + containerRequest: { + id: "patchContainer", + partitionKey: { + paths: ["/key", "/key2", "/key3"], + version: PartitionKeyDefinitionVersion.V2, + kind: PartitionKeyKind.MultiHash, + }, + throughput: 25100, + }, + documentToCreate: [ + { id: readItemId, key: true, key2: true, key3: true, class: "2010" }, + { + id: createItemWithBooleanPartitionKeyId, + key: true, + key2: false, + key3: true, + class: "2010", + }, + { + id: createItemWithUnknownPartitionKeyId, + key: {}, + key2: {}, + key3: {}, + class: "2010", + }, + { id: createItemWithNumberPartitionKeyId, key: 0, key2: 3, key3: 5, class: "2010" }, + { + id: createItemWithStringPartitionKeyId, + key: 5, + key2: {}, + key3: "adsf", + class: "2010", + }, + { id: deleteItemId, key: {}, key2: {}, key3: {}, class: "2011" }, + { id: replaceItemId, key: 5, key2: 5, key3: "T", class: "2012" }, + { id: patchItemId, key: 5, key2: 5, key3: true, class: "2019" }, + ], + bulkOperationOptions: {}, + operations: [ + { + description: "Read document with partitionKey containing booleans values.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: [true, false, true] }, + undefined, + createItemWithBooleanPartitionKeyId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "class", value: "2010" }, + ]), + }, + { + description: "Read document with partitionKey containing unknown values.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: [{}, {}, {}] }, + undefined, + createItemWithUnknownPartitionKeyId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "class", value: "2010" }, + ]), + }, + { + description: "Read document with partitionKey containing Number values.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: [0, 3, 5] }, + undefined, + createItemWithNumberPartitionKeyId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "class", value: "2010" }, + ]), + }, + { + description: "Creating document with partitionKey containing 2 strings.", + operation: createBulkOperation( + BulkOperationType.Create, + { partitionKey: ["A", "B", "C"] }, + { id: addEntropy("doc1"), name: "sample", key: "A", key2: "B", key3: "C" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(201, [ + { name: "name", value: "sample" }, + ]), + }, + { + description: "Creating document with mismatching partition key.", + operation: createBulkOperation( + BulkOperationType.Create, + { partitionKey: ["A", "V", true] }, + { id: addEntropy("doc1"), name: "sample", key: "A", key2: "B", key3: true } + ), + expectedOutput: creatreBulkOperationExpectedOutput(400, []), + }, + { + description: "Upsert document with partitionKey containing 2 strings.", + operation: createBulkOperation( + BulkOperationType.Upsert, + { partitionKey: ["U", "V", 5] }, + { name: "other", key: "U", key2: "V", key3: 5 } + ), + expectedOutput: creatreBulkOperationExpectedOutput(201, [ + { name: "name", value: "other" }, + ]), + }, + { + description: "Read document with partitionKey containing 2 booleans.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: [true, true, true] }, + undefined, + readItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "class", value: "2010" }, + ]), + }, + { + description: + "Delete document with partitionKey containing 2 undefined partition keys.", + operation: createBulkOperation( + BulkOperationType.Delete, + { partitionKey: [{}, {}, {}] }, + undefined, + deleteItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(204, []), + }, + { + description: "Replace document without specifying partition key.", + operation: createBulkOperation( + BulkOperationType.Replace, + {}, + { id: replaceItemId, name: "nice", key: 5, key2: 5, key3: "T" }, + replaceItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "name", value: "nice" }, + ]), + }, + { + description: "Patch document with partitionKey containing 2 Numbers.", + operation: createBulkOperation( + BulkOperationType.Patch, + { partitionKey: [5, 5, true] }, + { + operations: [ + { op: PatchOperationType.add, path: "/great", value: "goodValue" }, + ], + }, + patchItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, [ + { name: "great", value: "goodValue" }, + ]), + }, + { + description: "Conditional Patch document with partitionKey containing 2 Numbers.", + operation: createBulkOperation( + BulkOperationType.Patch, + { partitionKey: [5, 5, true] }, + { + operations: [ + { op: PatchOperationType.add, path: "/good", value: "greatValue" }, + ], + condition: "from c where NOT IS_DEFINED(c.newImproved)", + }, + patchItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, []), + }, + ], + }; + await runBulkTestDataSet(dataset); + }); + }); + it("respects order", async function () { + readItemId = addEntropy("item1"); + const dataset: BulkTestDataSet = { + ...defaultBulkTestDataSet, + documentToCreate: [{ id: readItemId, key: "A", class: "2010" }], + operations: [ + { + description: "Delete for an existing item should suceed.", + operation: createBulkOperation( + BulkOperationType.Delete, + { partitionKey: "A" }, + undefined, + readItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(204, []), + }, + { + description: "Delete occurs first, so the read returns a 404.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: "A" }, + undefined, + readItemId + ), + expectedOutput: creatreBulkOperationExpectedOutput(404, []), + }, + ], + }; + runBulkTestDataSet(dataset); + }); + it("424 errors for operations after an error", async function () { + const dataset: BulkTestDataSet = { + ...defaultBulkTestDataSet, + documentToCreate: [], + operations: [ + { + description: "Operation should fail with invalid ttl.", + operation: createBulkOperation( + BulkOperationType.Create, + {}, + { ttl: -10, key: "A" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(400, []), + }, + { + description: "", + operation: createBulkOperation( + BulkOperationType.Create, + { partitionKey: "A" }, + { key: "A", licenseType: "B", id: "o239uroihndsf" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(424, []), + }, + ], + }; + runBulkTestDataSet(dataset); + }); + it("Continues after errors with continueOnError true", async function () { + const dataset: BulkTestDataSet = { + ...defaultBulkTestDataSet, + documentToCreate: [], + bulkOperationOptions: { + continueOnError: true, + }, + operations: [ + { + description: "Operation should fail with invalid ttl.", + operation: createBulkOperation( + BulkOperationType.Create, + {}, + { ttl: -10, key: "A" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(400, []), + }, + { + description: + "Operation should suceed and should not be abondoned because of previous failure, since continueOnError is true.", + operation: createBulkOperation( + BulkOperationType.Create, + {}, + { key: "A", licenseType: "B", id: addEntropy("sifjsiof") } + ), + expectedOutput: creatreBulkOperationExpectedOutput(201, []), + }, + ], + }; + runBulkTestDataSet(dataset); + // const operations = [ + // { + // operationType: BulkOperationType.Create, + // resourceBody: { + // ttl: -10, + // key: "A", + // }, + // }, + // { + // operationType: BulkOperationType.Create, + // resourceBody: { + // key: "A", + // licenseType: "B", + // id: addEntropy("sifjsiof"), + // }, + // }, + // ]; + // const response = await v2Container.items.bulk(operations, { continueOnError: true }); + // assert.strictEqual(response[1].statusCode, 201); + }); + it("autogenerates IDs for Create operations", async function () { + const dataset: BulkTestDataSet = { + ...defaultBulkTestDataSet, + operations: [ + { + description: "Operation should fail with invalid ttl.", + operation: createBulkOperation( + BulkOperationType.Create, + {}, + { key: "A", licenseType: "C" } + ), + expectedOutput: creatreBulkOperationExpectedOutput(201, []), + }, + ], + }; + runBulkTestDataSet(dataset); + }); + it("handles operations with null, undefined, and 0 partition keys", async function () { + const item1Id = addEntropy("item1"); + const item2Id = addEntropy("item2"); + const item3Id = addEntropy("item2"); + const dataset: BulkTestDataSet = { + ...defaultBulkTestDataSet, + documentToCreate: [ + { id: item1Id, key: null, class: "2010" }, + { id: item2Id, key: 0 }, + { id: item3Id, key: undefined }, + ], + operations: [ + { + description: "Read document with null partition key should suceed.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: null }, + {}, + item1Id + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, []), + }, + { + description: "Read document with 0 partition key should suceed.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: 0 }, + {}, + item1Id + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, []), + }, + { + description: "Read document with undefined partition key should suceed.", + operation: createBulkOperation( + BulkOperationType.Read, + { partitionKey: undefined }, + {}, + item1Id + ), + expectedOutput: creatreBulkOperationExpectedOutput(200, []), + }, + ], + }; + runBulkTestDataSet(dataset); - // TODO: Non-deterministic test. We can't guarantee we see any response with a 429 status code since the retries happen within the response - describe("item read retries", async function () { - it("retries on 429", async function () { - const client = new CosmosClient({ key: masterKey, endpoint }); - const { resource: db } = await client.databases.create({ - id: `small db ${Math.random() * 1000}`, + // await v2Container.items.create({ + // id: item1Id, + // key: null, + // class: "2010", + // }); + // await v2Container.items.create({ + // id: item2Id, + // key: 0, + // }); + // await v2Container.items.create({ + // id: item3Id, + // key: undefined, + // }); + // const operations: OperationInput[] = [ + // { + // operationType: BulkOperationType.Read, + // id: item1Id, + // partitionKey: null, + // }, + // { + // operationType: BulkOperationType.Read, + // id: item2Id, + // partitionKey: 0, + // }, + // { + // operationType: BulkOperationType.Read, + // id: item3Id, + // partitionKey: undefined, + // }, + // ]; + // const response = await v2Container.items.bulk(operations); + // assert.equal(response[0].statusCode, 200); + // assert.equal(response[1].statusCode, 200); + // assert.equal(response[2].statusCode, 200); + }); }); - const containerResponse = await client - .database(db.id) - .containers.create({ id: `small container ${Math.random() * 1000}`, throughput: 400 }); - const container = containerResponse.container; - await container.items.create({ id: "readme" }); - const arr = new Array(400); - const promises = []; - for (let i = 0; i < arr.length; i++) { - promises.push(container.item("readme").read()); - } - const resp = await Promise.all(promises); - assert.equal(resp[0].statusCode, 200); - }); - }); + describe("multi partition container - nested partition key", async function () { + let container: Container; + let createItemId: string; + let upsertItemId: string; + before(async function () { + container = await getTestContainer("bulk container", undefined, { + partitionKey: { + paths: ["/nested/key"], + version: 2, + }, + throughput: 25100, + }); + createItemId = addEntropy("createItem"); + upsertItemId = addEntropy("upsertItem"); + }); + it("creates an item with nested object partition key", async function () { + const operations: OperationInput[] = [ + { + operationType: BulkOperationType.Create, + resourceBody: { + id: createItemId, + nested: { + key: "A", + }, + }, + }, + { + operationType: BulkOperationType.Upsert, + resourceBody: { + id: upsertItemId, + nested: { + key: false, + }, + }, + }, + ]; - describe("v2 single partition container", async function () { - let container: Container; - let createItemId: string; - let otherItemId: string; - let upsertItemId: string; - let replaceItemId: string; - let deleteItemId: string; - let patchItemId: string; - before(async function () { - const client = new CosmosClient({ key: masterKey, endpoint }); - const db = await client.databases.createIfNotExists({ id: "patchDb" }); - const contResponse = await db.database.containers.createIfNotExists({ - id: "patchContainer", - partitionKey: { - paths: ["/key"], - version: 2, - }, - throughput: 25100, + const createResponse = await container.items.bulk(operations); + assert.equal(createResponse[0].statusCode, 201); + }); }); - container = contResponse.container; - deleteItemId = addEntropy("item1"); - createItemId = addEntropy("item2"); - otherItemId = addEntropy("item2"); - upsertItemId = addEntropy("item4"); - replaceItemId = addEntropy("item3"); - patchItemId = addEntropy("item5"); - await container.items.create({ - id: deleteItemId, - key: "A", - class: "2010", - }); - await container.items.create({ - id: replaceItemId, - key: "A", - class: "2010", - }); - await container.items.create({ - id: patchItemId, - key: "A", - class: "2010", + describe("single partition container", async function () { + let container: Container; + let deleteItemId: string; + before(async function () { + container = await getTestContainer("bulk container"); + deleteItemId = addEntropy("item2"); + await container.items.create({ + id: deleteItemId, + key: "A", + class: "2010", + }); + }); + it("deletes an item with default partition", async function () { + const operation: OperationInput = { + operationType: BulkOperationType.Delete, + id: deleteItemId, + }; + + const deleteResponse = await container.items.bulk([operation]); + assert.equal(deleteResponse[0].statusCode, 204); + }); }); }); - it("can batch all operation types", async function () { - const operations: OperationInput[] = [ - { - operationType: BulkOperationType.Create, - resourceBody: { id: createItemId, key: "A", school: "high" }, - }, - { - operationType: BulkOperationType.Upsert, - resourceBody: { id: upsertItemId, key: "A", school: "elementary" }, - }, - { - operationType: BulkOperationType.Replace, - id: replaceItemId, - resourceBody: { id: replaceItemId, key: "A", school: "junior high" }, - }, - { - operationType: BulkOperationType.Delete, + }); + describe("test batch operations", function () { + describe("v2 multi partition container", async function () { + let container: Container; + let createItemId: string; + let otherItemId: string; + let upsertItemId: string; + let replaceItemId: string; + let deleteItemId: string; + let patchItemId: string; + before(async function () { + const client = new CosmosClient({ key: masterKey, endpoint }); + const db = await client.databases.createIfNotExists({ id: "patchDb" }); + const contResponse = await db.database.containers.createIfNotExists({ + id: "patchContainer", + partitionKey: { + paths: ["/key"], + version: 2, + }, + throughput: 25100, + }); + container = contResponse.container; + deleteItemId = addEntropy("item1"); + createItemId = addEntropy("item2"); + otherItemId = addEntropy("item2"); + upsertItemId = addEntropy("item4"); + replaceItemId = addEntropy("item3"); + patchItemId = addEntropy("item5"); + await container.items.create({ id: deleteItemId, - }, - { - operationType: BulkOperationType.Patch, + key: "A", + class: "2010", + }); + await container.items.create({ + id: replaceItemId, + key: "A", + class: "2010", + }); + await container.items.create({ id: patchItemId, - resourceBody: { - operations: [{ op: PatchOperationType.add, path: "/good", value: "greatValue" }], - condition: "from c where NOT IS_DEFINED(c.newImproved)", + key: "A", + class: "2010", + }); + }); + it("can batch all operation types", async function () { + const operations: OperationInput[] = [ + { + operationType: BulkOperationType.Create, + resourceBody: { id: createItemId, key: "A", school: "high" }, }, - }, - ]; + { + operationType: BulkOperationType.Upsert, + resourceBody: { id: upsertItemId, key: "A", school: "elementary" }, + }, + { + operationType: BulkOperationType.Replace, + id: replaceItemId, + resourceBody: { id: replaceItemId, key: "A", school: "junior high" }, + }, + { + operationType: BulkOperationType.Delete, + id: deleteItemId, + }, + { + operationType: BulkOperationType.Patch, + id: patchItemId, + resourceBody: { + operations: [{ op: PatchOperationType.add, path: "/good", value: "greatValue" }], + condition: "from c where NOT IS_DEFINED(c.newImproved)", + }, + }, + ]; - const response = await container.items.batch(operations, "A"); - assert(isOperationResponse(response.result[0])); - assert.strictEqual(response.result[0].statusCode, 201); - assert.strictEqual(response.result[1].statusCode, 201); - assert.strictEqual(response.result[2].statusCode, 200); - assert.strictEqual(response.result[3].statusCode, 204); - assert.strictEqual(response.result[4].statusCode, 200); - }); - it("rolls back prior operations when one fails", async function () { - const operations: OperationInput[] = [ - { - operationType: BulkOperationType.Upsert, - resourceBody: { id: otherItemId, key: "A", school: "elementary" }, - }, - { - operationType: BulkOperationType.Delete, - id: deleteItemId + addEntropy("make this 404"), - }, - ]; + const response = await container.items.batch(operations, "A"); + assert(isOperationResponse(response.result[0])); + assert.strictEqual(response.result[0].statusCode, 201); + assert.strictEqual(response.result[1].statusCode, 201); + assert.strictEqual(response.result[2].statusCode, 200); + assert.strictEqual(response.result[3].statusCode, 204); + assert.strictEqual(response.result[4].statusCode, 200); + }); + it("rolls back prior operations when one fails", async function () { + const operations: OperationInput[] = [ + { + operationType: BulkOperationType.Upsert, + resourceBody: { id: otherItemId, key: "A", school: "elementary" }, + }, + { + operationType: BulkOperationType.Delete, + id: deleteItemId + addEntropy("make this 404"), + }, + ]; - const deleteResponse = await container.items.batch(operations, "A"); - assert.strictEqual(deleteResponse.result[0].statusCode, 424); - assert.strictEqual(deleteResponse.result[1].statusCode, 404); - const { resource: readItem } = await container.item(otherItemId).read(); - assert.strictEqual(readItem, undefined); - assert(isOperationResponse(deleteResponse.result[0])); - }); + const deleteResponse = await container.items.batch(operations, "A"); + assert.strictEqual(deleteResponse.result[0].statusCode, 424); + assert.strictEqual(deleteResponse.result[1].statusCode, 404); + const { resource: readItem } = await container.item(otherItemId).read(); + assert.strictEqual(readItem, undefined); + assert(isOperationResponse(deleteResponse.result[0])); + }); - function isOperationResponse(object: unknown): object is OperationResponse { - return ( - typeof object === "object" && - object !== null && - Object.prototype.hasOwnProperty.call(object, "statusCode") && - Object.prototype.hasOwnProperty.call(object, "requestCharge") - ); - } + function isOperationResponse(object: unknown): object is OperationResponse { + return ( + typeof object === "object" && + object !== null && + Object.prototype.hasOwnProperty.call(object, "statusCode") && + Object.prototype.hasOwnProperty.call(object, "requestCharge") + ); + } + }); }); }); + describe("patch operations", function () { describe("various mixed operations", function () { let container: Container; diff --git a/sdk/cosmosdb/cosmos/tsconfig.strict.json b/sdk/cosmosdb/cosmos/tsconfig.strict.json index 30c74085188c..50c610202437 100644 --- a/sdk/cosmosdb/cosmos/tsconfig.strict.json +++ b/sdk/cosmosdb/cosmos/tsconfig.strict.json @@ -125,6 +125,7 @@ "src/routing/index.ts", "src/utils/SasToken.ts", "src/utils/tracing.ts", + "src/utils/hashing", "src/client/SasToken/SasTokenProperties.ts", "src/client/SasToken/PermissionScopeValues.ts", "test/public/common/TestHelpers.ts",