From 3a7a2acd7c700a667b9a50fa31b921a792fa3eb3 Mon Sep 17 00:00:00 2001 From: Heath Stewart Date: Wed, 3 Jun 2020 19:19:54 -0700 Subject: [PATCH 1/5] Rename AnalyzeResult to AnalyzeTextResult Fixes #12486 --- .../Azure.Search.Documents/CHANGELOG.md | 1 + .../Azure.Search.Documents.netstandard2.0.cs | 8 +- .../src/Generated/IndexesRestClient.cs | 6 +- ...cs => AnalyzeTextRequest.Serialization.cs} | 2 +- ...nalyzeRequest.cs => AnalyzeTextRequest.cs} | 19 +--- ...nalyzeRequest.cs => AnalyzeTextRequest.cs} | 17 ++- .../src/Indexes/SearchIndexClient.cs | 16 +-- .../tests/Models/AnalyzeTextRequestTests.cs | 19 ++++ .../tests/SearchIndexClientTests.cs | 19 ++++ .../SearchIndexClientTests/AnalyzeText.json | 106 ++++++++++++++++++ .../AnalyzeTextAsync.json | 106 ++++++++++++++++++ 11 files changed, 286 insertions(+), 33 deletions(-) rename sdk/search/Azure.Search.Documents/src/Generated/Models/{AnalyzeRequest.Serialization.cs => AnalyzeTextRequest.Serialization.cs} (95%) rename sdk/search/Azure.Search.Documents/src/Generated/Models/{AnalyzeRequest.cs => AnalyzeTextRequest.cs} (74%) rename sdk/search/Azure.Search.Documents/src/Indexes/Models/{AnalyzeRequest.cs => AnalyzeTextRequest.cs} (57%) create mode 100644 sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextRequestTests.cs create mode 100644 sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeText.json create mode 100644 sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeTextAsync.json diff --git a/sdk/search/Azure.Search.Documents/CHANGELOG.md b/sdk/search/Azure.Search.Documents/CHANGELOG.md index 8bea0751336b6..f675d4bbfe753 100644 --- a/sdk/search/Azure.Search.Documents/CHANGELOG.md +++ b/sdk/search/Azure.Search.Documents/CHANGELOG.md @@ -6,6 +6,7 @@ - Moved models for managing indexes, indexers, and skillsets to `Azure.Search.Documents.Indexes.Models`. - Split `SearchServiceClient` into `SearchIndexClient` for managing indexes, and `SearchIndexerClient` for managing indexers, both of which are now in `Azure.Search.Documents.Indexes`. +- Renamed `AnalyzeRequest` to `AnalyzeTextRequest`. - Renamed `SearchIndexerDataSource` to `SearchIndexerDataSourceConnection`. - Renamed methods on `SearchIndexerClient` matching "\*DataSource" to "\*DataSourceConnection". - Made collection- and dictionary-type properties read-only, i.e. has only get-accessors, based on [.NET Guidelines][net-guidelines-collection-properties]. diff --git a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs index 6fd9a6cb48f3b..7ec2d5cf81b54 100644 --- a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs +++ b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs @@ -109,8 +109,8 @@ public SearchIndexClient(System.Uri endpoint, Azure.AzureKeyCredential credentia public SearchIndexClient(System.Uri endpoint, Azure.AzureKeyCredential credential, Azure.Search.Documents.SearchClientOptions options) { } public virtual System.Uri Endpoint { get { throw null; } } public virtual string ServiceName { get { throw null; } } - public virtual Azure.Response> AnalyzeText(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeRequest analyzeRequest, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task>> AnalyzeTextAsync(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeRequest analyzeRequest, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response> AnalyzeText(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextRequest request, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task>> AnalyzeTextAsync(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextRequest request, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateIndex(Azure.Search.Documents.Indexes.Models.SearchIndex index, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> CreateIndexAsync(Azure.Search.Documents.Indexes.Models.SearchIndex index, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateOrUpdateIndex(Azure.Search.Documents.Indexes.Models.SearchIndex index, bool allowIndexDowntime = false, bool onlyIfUnchanged = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -212,9 +212,9 @@ internal AnalyzedTokenInfo() { } public int StartOffset { get { throw null; } } public string Token { get { throw null; } } } - public partial class AnalyzeRequest + public partial class AnalyzeTextRequest { - public AnalyzeRequest(string text) { } + public AnalyzeTextRequest(string text) { } public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? Analyzer { get { throw null; } set { } } public System.Collections.Generic.IList CharFilters { get { throw null; } } public string Text { get { throw null; } } diff --git a/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs b/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs index d043fa369afde..e7cfecd40f0df 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs @@ -584,7 +584,7 @@ public Response GetStatistics(string indexName, Cancellat } } - internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeRequest request) + internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeTextRequest request) { var message = _pipeline.CreateMessage(); var request0 = message.Request; @@ -612,7 +612,7 @@ internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeRequest reque /// The name of the index for which to test an analyzer. /// The text and analyzer or analysis components to test. /// The cancellation token to use. - public async Task> AnalyzeAsync(string indexName, AnalyzeRequest request, CancellationToken cancellationToken = default) + public async Task> AnalyzeAsync(string indexName, AnalyzeTextRequest request, CancellationToken cancellationToken = default) { if (indexName == null) { @@ -650,7 +650,7 @@ public async Task> AnalyzeAsync(string indexName, Analyz /// The name of the index for which to test an analyzer. /// The text and analyzer or analysis components to test. /// The cancellation token to use. - public Response Analyze(string indexName, AnalyzeRequest request, CancellationToken cancellationToken = default) + public Response Analyze(string indexName, AnalyzeTextRequest request, CancellationToken cancellationToken = default) { if (indexName == null) { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.Serialization.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.Serialization.cs similarity index 95% rename from sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.Serialization.cs rename to sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.Serialization.cs index a5943ff2a735c..ecccc0c38ddad 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.Serialization.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.Serialization.cs @@ -11,7 +11,7 @@ namespace Azure.Search.Documents.Indexes.Models { - public partial class AnalyzeRequest : IUtf8JsonSerializable + public partial class AnalyzeTextRequest : IUtf8JsonSerializable { void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.cs similarity index 74% rename from sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.cs rename to sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.cs index 56ad05a5cfa41..966afdad32c14 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeRequest.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.cs @@ -11,29 +11,16 @@ namespace Azure.Search.Documents.Indexes.Models { /// Specifies some text and analysis components used to break that text into tokens. - public partial class AnalyzeRequest + public partial class AnalyzeTextRequest { - /// Initializes a new instance of AnalyzeRequest. - /// The text to break into tokens. - public AnalyzeRequest(string text) - { - if (text == null) - { - throw new ArgumentNullException(nameof(text)); - } - - Text = text; - TokenFilters = new List(); - CharFilters = new List(); - } - /// Initializes a new instance of AnalyzeRequest. + /// Initializes a new instance of AnalyzeTextRequest. /// The text to break into tokens. /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - internal AnalyzeRequest(string text, LexicalAnalyzerName? analyzer, LexicalTokenizerName? tokenizer, IList tokenFilters, IList charFilters) + internal AnalyzeTextRequest(string text, LexicalAnalyzerName? analyzer, LexicalTokenizerName? tokenizer, IList tokenFilters, IList charFilters) { Text = text; Analyzer = analyzer; diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeRequest.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextRequest.cs similarity index 57% rename from sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeRequest.cs rename to sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextRequest.cs index 1b9f1bc3eead5..88d338ef33e91 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeRequest.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextRequest.cs @@ -1,13 +1,28 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +using System; using System.Collections.Generic; using Azure.Core; namespace Azure.Search.Documents.Indexes.Models { - public partial class AnalyzeRequest + [CodeGenModel("AnalyzeRequest")] + public partial class AnalyzeTextRequest { + /// + /// Initializes a new instance of AnalyzeRequest. + /// One of or is also required. + /// + /// Required text to break into tokens. + public AnalyzeTextRequest(string text) + { + Text = text ?? throw new ArgumentNullException(nameof(text)); + + TokenFilters = new List(); + CharFilters = new List(); + } + /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] public IList TokenFilters { get; } diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs b/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs index bbd0ce054faea..d3b27385204cd 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs @@ -227,16 +227,16 @@ public virtual async Task> GetServiceStatistic /// Shows how an analyzer breaks text into tokens. /// /// The name of the index used to test an analyzer. - /// The containing the text and analyzer or analyzer components to test. + /// The containing the text and analyzer or analyzer components to test. /// Optional to propagate notifications that the operation should be canceled. /// /// The from the server containing a list of for analyzed text. /// - /// Thrown when or is null. + /// Thrown when or is null. /// Thrown when a failure is returned by the Search service. public virtual Response> AnalyzeText( string indexName, - AnalyzeRequest analyzeRequest, + AnalyzeTextRequest request, CancellationToken cancellationToken = default) { using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(SearchIndexClient)}.{nameof(AnalyzeText)}"); @@ -245,7 +245,7 @@ public virtual Response> AnalyzeText( { Response result = IndexesClient.Analyze( indexName, - analyzeRequest, + request, cancellationToken); return Response.FromValue(result.Value.Tokens, result.GetRawResponse()); @@ -261,16 +261,16 @@ public virtual Response> AnalyzeText( /// Shows how an analyzer breaks text into tokens. /// /// The name of the index used to test an analyzer. - /// The containing the text and analyzer or analyzer components to test. + /// The containing the text and analyzer or analyzer components to test. /// Optional to propagate notifications that the operation should be canceled. /// /// The from the server containing a list of for analyzed text. /// - /// Thrown when or is null. + /// Thrown when or is null. /// Thrown when a failure is returned by the Search service. public virtual async Task>> AnalyzeTextAsync( string indexName, - AnalyzeRequest analyzeRequest, + AnalyzeTextRequest request, CancellationToken cancellationToken = default) { using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(SearchIndexClient)}.{nameof(AnalyzeText)}"); @@ -279,7 +279,7 @@ public virtual async Task>> AnalyzeTex { Response result = await IndexesClient.AnalyzeAsync( indexName, - analyzeRequest, + request, cancellationToken) .ConfigureAwait(false); diff --git a/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextRequestTests.cs b/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextRequestTests.cs new file mode 100644 index 0000000000000..f9b908217d6af --- /dev/null +++ b/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextRequestTests.cs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using Azure.Search.Documents.Indexes.Models; +using NUnit.Framework; + +namespace Azure.Search.Documents.Tests.Models +{ + public class AnalyzeTextRequestTests + { + [Test] + public void RequiresText() + { + ArgumentNullException ex = Assert.Throws(() => new AnalyzeTextRequest(null)); + Assert.AreEqual("text", ex.ParamName); + } + } +} diff --git a/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs b/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs index f492c83749213..2dfacaa06bf2a 100644 --- a/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs +++ b/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; +using System.Linq; using System.Net; using System.Threading.Tasks; using Azure.Core; @@ -314,5 +315,23 @@ await client.CreateOrUpdateSynonymMapAsync( await client.DeleteSynonymMapAsync(updatedMap, onlyIfUnchanged: true); } + + [Test] + public async Task AnalyzeText() + { + await using SearchResources resources = await SearchResources.GetSharedHotelsIndexAsync(this); + + SearchIndexClient client = resources.GetIndexClient(); + + AnalyzeTextRequest request = new AnalyzeTextRequest("The quick brown fox jumped over the lazy dog.") + { + Tokenizer = LexicalTokenizerName.Whitespace, + }; + + Response> result = await client.AnalyzeTextAsync(resources.IndexName, request); + IReadOnlyList tokens = result.Value; + + Assert.AreEqual(new[] { "The", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "dog." }, tokens.Select(t => t.Token)); + } } } diff --git a/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeText.json b/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeText.json new file mode 100644 index 0000000000000..883af27c73f5f --- /dev/null +++ b/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeText.json @@ -0,0 +1,106 @@ +{ + "Entries": [ + { + "RequestUri": "https://azs-net-heathsrchtst.search.windows.net/indexes(\u0027omcjubbl\u0027)/search.analyze?api-version=2019-05-06-Preview", + "RequestMethod": "POST", + "RequestHeaders": { + "Accept": "application/json; odata.metadata=minimal", + "api-key": "Sanitized", + "Content-Length": "81", + "Content-Type": "application/json", + "traceparent": "00-0fcda9bf5af6cc4ab507afc2cde9ff76-d9390cf074ea274c-00", + "User-Agent": [ + "azsdk-net-Search.Documents/1.0.0-dev.20200603.1", + "(.NET Core 4.6.28801.04; Microsoft Windows 10.0.19041 )" + ], + "x-ms-client-request-id": "b6bb294210d3599c5ce1600661618eab", + "x-ms-return-client-request-id": "true" + }, + "RequestBody": { + "text": "The quick brown fox jumped over the lazy dog.", + "tokenizer": "whitespace" + }, + "StatusCode": 200, + "ResponseHeaders": { + "Cache-Control": "no-cache", + "client-request-id": "b6bb2942-10d3-599c-5ce1-600661618eab", + "Content-Length": "701", + "Content-Type": "application/json; odata.metadata=minimal", + "Date": "Thu, 04 Jun 2020 02:02:51 GMT", + "elapsed-time": "41", + "Expires": "-1", + "OData-Version": "4.0", + "Pragma": "no-cache", + "Preference-Applied": "odata.include-annotations=\u0022*\u0022", + "request-id": "b6bb2942-10d3-599c-5ce1-600661618eab", + "Strict-Transport-Security": "max-age=15724800; includeSubDomains", + "x-ms-client-request-id": "b6bb2942-10d3-599c-5ce1-600661618eab" + }, + "ResponseBody": { + "@odata.context": "https://azs-net-heathsrchtst.search.windows.net/$metadata#Microsoft.Azure.Search.V2019_05_06_Preview.AnalyzeResult", + "tokens": [ + { + "token": "The", + "startOffset": 0, + "endOffset": 3, + "position": 0 + }, + { + "token": "quick", + "startOffset": 4, + "endOffset": 9, + "position": 1 + }, + { + "token": "brown", + "startOffset": 10, + "endOffset": 15, + "position": 2 + }, + { + "token": "fox", + "startOffset": 16, + "endOffset": 19, + "position": 3 + }, + { + "token": "jumped", + "startOffset": 20, + "endOffset": 26, + "position": 4 + }, + { + "token": "over", + "startOffset": 27, + "endOffset": 31, + "position": 5 + }, + { + "token": "the", + "startOffset": 32, + "endOffset": 35, + "position": 6 + }, + { + "token": "lazy", + "startOffset": 36, + "endOffset": 40, + "position": 7 + }, + { + "token": "dog.", + "startOffset": 41, + "endOffset": 45, + "position": 8 + } + ] + } + } + ], + "Variables": { + "RandomSeed": "398631221", + "SearchIndexName": "omcjubbl", + "SEARCH_ADMIN_API_KEY": "Sanitized", + "SEARCH_SERVICE_NAME": "azs-net-heathsrchtst" + } +} \ No newline at end of file diff --git a/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeTextAsync.json b/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeTextAsync.json new file mode 100644 index 0000000000000..68fb02aab69bc --- /dev/null +++ b/sdk/search/Azure.Search.Documents/tests/SessionRecords/SearchIndexClientTests/AnalyzeTextAsync.json @@ -0,0 +1,106 @@ +{ + "Entries": [ + { + "RequestUri": "https://azs-net-heathsrchtst.search.windows.net/indexes(\u0027omcjubbl\u0027)/search.analyze?api-version=2019-05-06-Preview", + "RequestMethod": "POST", + "RequestHeaders": { + "Accept": "application/json; odata.metadata=minimal", + "api-key": "Sanitized", + "Content-Length": "81", + "Content-Type": "application/json", + "traceparent": "00-2814f435f4dcd24e9d2ab4ee73d60e28-90ec2e1c86b12949-00", + "User-Agent": [ + "azsdk-net-Search.Documents/1.0.0-dev.20200603.1", + "(.NET Core 4.6.28801.04; Microsoft Windows 10.0.19041 )" + ], + "x-ms-client-request-id": "768227d217ad9009492ef7abdef00368", + "x-ms-return-client-request-id": "true" + }, + "RequestBody": { + "text": "The quick brown fox jumped over the lazy dog.", + "tokenizer": "whitespace" + }, + "StatusCode": 200, + "ResponseHeaders": { + "Cache-Control": "no-cache", + "client-request-id": "768227d2-17ad-9009-492e-f7abdef00368", + "Content-Length": "701", + "Content-Type": "application/json; odata.metadata=minimal", + "Date": "Thu, 04 Jun 2020 02:02:51 GMT", + "elapsed-time": "6", + "Expires": "-1", + "OData-Version": "4.0", + "Pragma": "no-cache", + "Preference-Applied": "odata.include-annotations=\u0022*\u0022", + "request-id": "768227d2-17ad-9009-492e-f7abdef00368", + "Strict-Transport-Security": "max-age=15724800; includeSubDomains", + "x-ms-client-request-id": "768227d2-17ad-9009-492e-f7abdef00368" + }, + "ResponseBody": { + "@odata.context": "https://azs-net-heathsrchtst.search.windows.net/$metadata#Microsoft.Azure.Search.V2019_05_06_Preview.AnalyzeResult", + "tokens": [ + { + "token": "The", + "startOffset": 0, + "endOffset": 3, + "position": 0 + }, + { + "token": "quick", + "startOffset": 4, + "endOffset": 9, + "position": 1 + }, + { + "token": "brown", + "startOffset": 10, + "endOffset": 15, + "position": 2 + }, + { + "token": "fox", + "startOffset": 16, + "endOffset": 19, + "position": 3 + }, + { + "token": "jumped", + "startOffset": 20, + "endOffset": 26, + "position": 4 + }, + { + "token": "over", + "startOffset": 27, + "endOffset": 31, + "position": 5 + }, + { + "token": "the", + "startOffset": 32, + "endOffset": 35, + "position": 6 + }, + { + "token": "lazy", + "startOffset": 36, + "endOffset": 40, + "position": 7 + }, + { + "token": "dog.", + "startOffset": 41, + "endOffset": 45, + "position": 8 + } + ] + } + } + ], + "Variables": { + "RandomSeed": "197061334", + "SearchIndexName": "omcjubbl", + "SEARCH_ADMIN_API_KEY": "Sanitized", + "SEARCH_SERVICE_NAME": "azs-net-heathsrchtst" + } +} \ No newline at end of file From 6027caf83506959cfa3b376c480805002b1ea810 Mon Sep 17 00:00:00 2001 From: Heath Stewart Date: Thu, 4 Jun 2020 17:21:45 -0700 Subject: [PATCH 2/5] Rename AnalyzeRequest to AnalyzeTextOptions Fixes #12530 --- .../Azure.Search.Documents/CHANGELOG.md | 2 +- .../Azure.Search.Documents.netstandard2.0.cs | 14 +++-- .../src/Generated/IndexesRestClient.cs | 6 +- ...cs => AnalyzeTextOptions.Serialization.cs} | 2 +- ...zeTextRequest.cs => AnalyzeTextOptions.cs} | 23 +++++--- .../src/Indexes/Models/AnalyzeTextOptions.cs | 59 +++++++++++++++++++ .../src/Indexes/Models/AnalyzeTextRequest.cs | 34 ----------- .../src/Indexes/SearchIndexClient.cs | 8 +-- ...estTests.cs => AnalyzeTextOptionsTests.cs} | 4 +- .../tests/SearchIndexClientTests.cs | 5 +- 10 files changed, 95 insertions(+), 62 deletions(-) rename sdk/search/Azure.Search.Documents/src/Generated/Models/{AnalyzeTextRequest.Serialization.cs => AnalyzeTextOptions.Serialization.cs} (96%) rename sdk/search/Azure.Search.Documents/src/Generated/Models/{AnalyzeTextRequest.cs => AnalyzeTextOptions.cs} (69%) create mode 100644 sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs delete mode 100644 sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextRequest.cs rename sdk/search/Azure.Search.Documents/tests/Models/{AnalyzeTextRequestTests.cs => AnalyzeTextOptionsTests.cs} (77%) diff --git a/sdk/search/Azure.Search.Documents/CHANGELOG.md b/sdk/search/Azure.Search.Documents/CHANGELOG.md index f675d4bbfe753..4d69dd2ce5897 100644 --- a/sdk/search/Azure.Search.Documents/CHANGELOG.md +++ b/sdk/search/Azure.Search.Documents/CHANGELOG.md @@ -6,7 +6,7 @@ - Moved models for managing indexes, indexers, and skillsets to `Azure.Search.Documents.Indexes.Models`. - Split `SearchServiceClient` into `SearchIndexClient` for managing indexes, and `SearchIndexerClient` for managing indexers, both of which are now in `Azure.Search.Documents.Indexes`. -- Renamed `AnalyzeRequest` to `AnalyzeTextRequest`. +- Renamed `AnalyzeRequest` to `AnalyzeTextOptions`, and overloaded constructors to make constructing it - Renamed `SearchIndexerDataSource` to `SearchIndexerDataSourceConnection`. - Renamed methods on `SearchIndexerClient` matching "\*DataSource" to "\*DataSourceConnection". - Made collection- and dictionary-type properties read-only, i.e. has only get-accessors, based on [.NET Guidelines][net-guidelines-collection-properties]. diff --git a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs index 7ec2d5cf81b54..aaa1e59ee9851 100644 --- a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs +++ b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs @@ -109,8 +109,8 @@ public SearchIndexClient(System.Uri endpoint, Azure.AzureKeyCredential credentia public SearchIndexClient(System.Uri endpoint, Azure.AzureKeyCredential credential, Azure.Search.Documents.SearchClientOptions options) { } public virtual System.Uri Endpoint { get { throw null; } } public virtual string ServiceName { get { throw null; } } - public virtual Azure.Response> AnalyzeText(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextRequest request, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task>> AnalyzeTextAsync(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextRequest request, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response> AnalyzeText(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextOptions request, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task>> AnalyzeTextAsync(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextOptions request, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateIndex(Azure.Search.Documents.Indexes.Models.SearchIndex index, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> CreateIndexAsync(Azure.Search.Documents.Indexes.Models.SearchIndex index, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateOrUpdateIndex(Azure.Search.Documents.Indexes.Models.SearchIndex index, bool allowIndexDowntime = false, bool onlyIfUnchanged = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } @@ -212,14 +212,16 @@ internal AnalyzedTokenInfo() { } public int StartOffset { get { throw null; } } public string Token { get { throw null; } } } - public partial class AnalyzeTextRequest + public partial class AnalyzeTextOptions { - public AnalyzeTextRequest(string text) { } - public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? Analyzer { get { throw null; } set { } } + public AnalyzeTextOptions(string text) { } + public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName analyzer) { } + public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizer) { } + public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? Analyzer { get { throw null; } } public System.Collections.Generic.IList CharFilters { get { throw null; } } public string Text { get { throw null; } } public System.Collections.Generic.IList TokenFilters { get { throw null; } } - public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName? Tokenizer { get { throw null; } set { } } + public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName? Tokenizer { get { throw null; } } } public partial class AsciiFoldingTokenFilter : Azure.Search.Documents.Indexes.Models.TokenFilter { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs b/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs index e7cfecd40f0df..6f7c379dc5cb4 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/IndexesRestClient.cs @@ -584,7 +584,7 @@ public Response GetStatistics(string indexName, Cancellat } } - internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeTextRequest request) + internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeTextOptions request) { var message = _pipeline.CreateMessage(); var request0 = message.Request; @@ -612,7 +612,7 @@ internal HttpMessage CreateAnalyzeRequest(string indexName, AnalyzeTextRequest r /// The name of the index for which to test an analyzer. /// The text and analyzer or analysis components to test. /// The cancellation token to use. - public async Task> AnalyzeAsync(string indexName, AnalyzeTextRequest request, CancellationToken cancellationToken = default) + public async Task> AnalyzeAsync(string indexName, AnalyzeTextOptions request, CancellationToken cancellationToken = default) { if (indexName == null) { @@ -650,7 +650,7 @@ public async Task> AnalyzeAsync(string indexName, Analyz /// The name of the index for which to test an analyzer. /// The text and analyzer or analysis components to test. /// The cancellation token to use. - public Response Analyze(string indexName, AnalyzeTextRequest request, CancellationToken cancellationToken = default) + public Response Analyze(string indexName, AnalyzeTextOptions request, CancellationToken cancellationToken = default) { if (indexName == null) { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.Serialization.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs similarity index 96% rename from sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.Serialization.cs rename to sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs index ecccc0c38ddad..8fadfb684c861 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.Serialization.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs @@ -11,7 +11,7 @@ namespace Azure.Search.Documents.Indexes.Models { - public partial class AnalyzeTextRequest : IUtf8JsonSerializable + public partial class AnalyzeTextOptions : IUtf8JsonSerializable { void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs similarity index 69% rename from sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.cs rename to sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs index 966afdad32c14..f8595653de47f 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextRequest.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs @@ -11,16 +11,29 @@ namespace Azure.Search.Documents.Indexes.Models { /// Specifies some text and analysis components used to break that text into tokens. - public partial class AnalyzeTextRequest + public partial class AnalyzeTextOptions { + /// Initializes a new instance of AnalyzeTextOptions. + /// The text to break into tokens. + public AnalyzeTextOptions(string text) + { + if (text == null) + { + throw new ArgumentNullException(nameof(text)); + } + + Text = text; + TokenFilters = new List(); + CharFilters = new List(); + } - /// Initializes a new instance of AnalyzeTextRequest. + /// Initializes a new instance of AnalyzeTextOptions. /// The text to break into tokens. /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - internal AnalyzeTextRequest(string text, LexicalAnalyzerName? analyzer, LexicalTokenizerName? tokenizer, IList tokenFilters, IList charFilters) + internal AnalyzeTextOptions(string text, LexicalAnalyzerName? analyzer, LexicalTokenizerName? tokenizer, IList tokenFilters, IList charFilters) { Text = text; Analyzer = analyzer; @@ -31,9 +44,5 @@ internal AnalyzeTextRequest(string text, LexicalAnalyzerName? analyzer, LexicalT /// The text to break into tokens. public string Text { get; } - /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. - public LexicalAnalyzerName? Analyzer { get; set; } - /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. - public LexicalTokenizerName? Tokenizer { get; set; } } } diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs new file mode 100644 index 0000000000000..75d0e84f60d70 --- /dev/null +++ b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using Azure.Core; + +namespace Azure.Search.Documents.Indexes.Models +{ + [CodeGenModel("AnalyzeRequest")] + public partial class AnalyzeTextOptions + { + /// + /// Initializes a new instance of AnalyzeRequest. + /// + /// Required text to break into tokens. + /// The name of the analyzer to use to break the given . + /// is null. + public AnalyzeTextOptions(string text, LexicalAnalyzerName analyzer) + { + Text = text ?? throw new ArgumentNullException(nameof(text)); + Analyzer = analyzer; + + TokenFilters = new List(); + CharFilters = new List(); + } + + /// + /// Initializes a new instance of AnalyzeRequest. + /// + /// Required text to break into tokens. + /// The name of the tokenizer to use to break the given . + /// is null. + public AnalyzeTextOptions(string text, LexicalTokenizerName tokenizer) + { + Text = text ?? throw new ArgumentNullException(nameof(text)); + Tokenizer = tokenizer; + + TokenFilters = new List(); + CharFilters = new List(); + } + + /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. + [CodeGenMember("Analyzer")] + public LexicalAnalyzerName? Analyzer { get; } + + /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. + [CodeGenMember("Tokenizer")] + public LexicalTokenizerName? Tokenizer { get; } + + /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. + [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] + public IList TokenFilters { get; } + + /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. + [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] + public IList CharFilters { get; } + } +} diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextRequest.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextRequest.cs deleted file mode 100644 index 88d338ef33e91..0000000000000 --- a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextRequest.cs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using System; -using System.Collections.Generic; -using Azure.Core; - -namespace Azure.Search.Documents.Indexes.Models -{ - [CodeGenModel("AnalyzeRequest")] - public partial class AnalyzeTextRequest - { - /// - /// Initializes a new instance of AnalyzeRequest. - /// One of or is also required. - /// - /// Required text to break into tokens. - public AnalyzeTextRequest(string text) - { - Text = text ?? throw new ArgumentNullException(nameof(text)); - - TokenFilters = new List(); - CharFilters = new List(); - } - - /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] - public IList TokenFilters { get; } - - /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] - public IList CharFilters { get; } - } -} diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs b/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs index d3b27385204cd..88f1d1315d6bc 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs @@ -227,7 +227,7 @@ public virtual async Task> GetServiceStatistic /// Shows how an analyzer breaks text into tokens. /// /// The name of the index used to test an analyzer. - /// The containing the text and analyzer or analyzer components to test. + /// The containing the text and analyzer or analyzer components to test. /// Optional to propagate notifications that the operation should be canceled. /// /// The from the server containing a list of for analyzed text. @@ -236,7 +236,7 @@ public virtual async Task> GetServiceStatistic /// Thrown when a failure is returned by the Search service. public virtual Response> AnalyzeText( string indexName, - AnalyzeTextRequest request, + AnalyzeTextOptions request, CancellationToken cancellationToken = default) { using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(SearchIndexClient)}.{nameof(AnalyzeText)}"); @@ -261,7 +261,7 @@ public virtual Response> AnalyzeText( /// Shows how an analyzer breaks text into tokens. /// /// The name of the index used to test an analyzer. - /// The containing the text and analyzer or analyzer components to test. + /// The containing the text and analyzer or analyzer components to test. /// Optional to propagate notifications that the operation should be canceled. /// /// The from the server containing a list of for analyzed text. @@ -270,7 +270,7 @@ public virtual Response> AnalyzeText( /// Thrown when a failure is returned by the Search service. public virtual async Task>> AnalyzeTextAsync( string indexName, - AnalyzeTextRequest request, + AnalyzeTextOptions request, CancellationToken cancellationToken = default) { using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(SearchIndexClient)}.{nameof(AnalyzeText)}"); diff --git a/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextRequestTests.cs b/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextOptionsTests.cs similarity index 77% rename from sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextRequestTests.cs rename to sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextOptionsTests.cs index f9b908217d6af..5e8c6a27474da 100644 --- a/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextRequestTests.cs +++ b/sdk/search/Azure.Search.Documents/tests/Models/AnalyzeTextOptionsTests.cs @@ -7,12 +7,12 @@ namespace Azure.Search.Documents.Tests.Models { - public class AnalyzeTextRequestTests + public class AnalyzeTextOptionsTests { [Test] public void RequiresText() { - ArgumentNullException ex = Assert.Throws(() => new AnalyzeTextRequest(null)); + ArgumentNullException ex = Assert.Throws(() => new AnalyzeTextOptions(null, LexicalTokenizerName.Whitespace)); Assert.AreEqual("text", ex.ParamName); } } diff --git a/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs b/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs index 2dfacaa06bf2a..f365339da8578 100644 --- a/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs +++ b/sdk/search/Azure.Search.Documents/tests/SearchIndexClientTests.cs @@ -323,10 +323,7 @@ public async Task AnalyzeText() SearchIndexClient client = resources.GetIndexClient(); - AnalyzeTextRequest request = new AnalyzeTextRequest("The quick brown fox jumped over the lazy dog.") - { - Tokenizer = LexicalTokenizerName.Whitespace, - }; + AnalyzeTextOptions request = new AnalyzeTextOptions("The quick brown fox jumped over the lazy dog.", LexicalTokenizerName.Whitespace); Response> result = await client.AnalyzeTextAsync(resources.IndexName, request); IReadOnlyList tokens = result.Value; From 0a0ed2151eed20813c5aa1b280a2b6fe7b2e26c8 Mon Sep 17 00:00:00 2001 From: Heath Stewart Date: Thu, 4 Jun 2020 19:36:47 -0700 Subject: [PATCH 3/5] Rename some properties, parameters for consistency Fixes #12536 --- .../Azure.Search.Documents/CHANGELOG.md | 5 +++- .../Azure.Search.Documents.netstandard2.0.cs | 12 +++++----- .../AnalyzeTextOptions.Serialization.cs | 8 +++---- .../Generated/Models/AnalyzeTextOptions.cs | 10 ++++---- .../Models/CustomAnalyzer.Serialization.cs | 2 +- .../src/Generated/Models/CustomAnalyzer.cs | 24 +++---------------- .../src/Indexes/Models/AnalyzeTextOptions.cs | 16 ++++++------- .../src/Indexes/Models/CustomAnalyzer.cs | 17 +++++++++++++ 8 files changed, 48 insertions(+), 46 deletions(-) diff --git a/sdk/search/Azure.Search.Documents/CHANGELOG.md b/sdk/search/Azure.Search.Documents/CHANGELOG.md index 4d69dd2ce5897..705e3c250764e 100644 --- a/sdk/search/Azure.Search.Documents/CHANGELOG.md +++ b/sdk/search/Azure.Search.Documents/CHANGELOG.md @@ -6,7 +6,10 @@ - Moved models for managing indexes, indexers, and skillsets to `Azure.Search.Documents.Indexes.Models`. - Split `SearchServiceClient` into `SearchIndexClient` for managing indexes, and `SearchIndexerClient` for managing indexers, both of which are now in `Azure.Search.Documents.Indexes`. -- Renamed `AnalyzeRequest` to `AnalyzeTextOptions`, and overloaded constructors to make constructing it +- Renamed `AnalyzeRequest` to `AnalyzeTextOptions`, and overloaded constructors with required parameters. +- Renamed `AnalyzeTextOptions.Analyzer` to `AnalyzeTextOptions.AnalyzerName`. +- Renamed `AnalyzeTextOptions.Tokenizer` to `AnalyzeTextOptions.TokenizerName`. +- Renamed `CustomAnalyzer.Tokenizer` to `CustomAnalyzer.TokenizerName`. - Renamed `SearchIndexerDataSource` to `SearchIndexerDataSourceConnection`. - Renamed methods on `SearchIndexerClient` matching "\*DataSource" to "\*DataSourceConnection". - Made collection- and dictionary-type properties read-only, i.e. has only get-accessors, based on [.NET Guidelines][net-guidelines-collection-properties]. diff --git a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs index aaa1e59ee9851..c0bb76bf9579f 100644 --- a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs +++ b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs @@ -215,13 +215,13 @@ internal AnalyzedTokenInfo() { } public partial class AnalyzeTextOptions { public AnalyzeTextOptions(string text) { } - public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName analyzer) { } - public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizer) { } - public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? Analyzer { get { throw null; } } + public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName analyzerName) { } + public AnalyzeTextOptions(string text, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizerName) { } + public Azure.Search.Documents.Indexes.Models.LexicalAnalyzerName? AnalyzerName { get { throw null; } } public System.Collections.Generic.IList CharFilters { get { throw null; } } public string Text { get { throw null; } } public System.Collections.Generic.IList TokenFilters { get { throw null; } } - public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName? Tokenizer { get { throw null; } } + public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName? TokenizerName { get { throw null; } } } public partial class AsciiFoldingTokenFilter : Azure.Search.Documents.Indexes.Models.TokenFilter { @@ -295,10 +295,10 @@ public CorsOptions(System.Collections.Generic.IEnumerable allowedOrigins } public partial class CustomAnalyzer : Azure.Search.Documents.Indexes.Models.LexicalAnalyzer { - public CustomAnalyzer(string name, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizer) { } + public CustomAnalyzer(string name, Azure.Search.Documents.Indexes.Models.LexicalTokenizerName tokenizerName) { } public System.Collections.Generic.IList CharFilters { get { throw null; } } public System.Collections.Generic.IList TokenFilters { get { throw null; } } - public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName Tokenizer { get { throw null; } set { } } + public Azure.Search.Documents.Indexes.Models.LexicalTokenizerName TokenizerName { get { throw null; } set { } } } public partial class DataChangeDetectionPolicy { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs index 8fadfb684c861..b52a3b874967a 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.Serialization.cs @@ -18,15 +18,15 @@ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) writer.WriteStartObject(); writer.WritePropertyName("text"); writer.WriteStringValue(Text); - if (Analyzer != null) + if (AnalyzerName != null) { writer.WritePropertyName("analyzer"); - writer.WriteStringValue(Analyzer.Value.ToString()); + writer.WriteStringValue(AnalyzerName.Value.ToString()); } - if (Tokenizer != null) + if (TokenizerName != null) { writer.WritePropertyName("tokenizer"); - writer.WriteStringValue(Tokenizer.Value.ToString()); + writer.WriteStringValue(TokenizerName.Value.ToString()); } if (TokenFilters != null && TokenFilters.Any()) { diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs index f8595653de47f..ee81a8ed6578f 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/AnalyzeTextOptions.cs @@ -29,15 +29,15 @@ public AnalyzeTextOptions(string text) /// Initializes a new instance of AnalyzeTextOptions. /// The text to break into tokens. - /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. - /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. + /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. + /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. /// An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - internal AnalyzeTextOptions(string text, LexicalAnalyzerName? analyzer, LexicalTokenizerName? tokenizer, IList tokenFilters, IList charFilters) + internal AnalyzeTextOptions(string text, LexicalAnalyzerName? analyzerName, LexicalTokenizerName? tokenizerName, IList tokenFilters, IList charFilters) { Text = text; - Analyzer = analyzer; - Tokenizer = tokenizer; + AnalyzerName = analyzerName; + TokenizerName = tokenizerName; TokenFilters = tokenFilters ?? new List(); CharFilters = charFilters ?? new List(); } diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.Serialization.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.Serialization.cs index 10848d4d2dd2a..c485886e18757 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.Serialization.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.Serialization.cs @@ -18,7 +18,7 @@ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) { writer.WriteStartObject(); writer.WritePropertyName("tokenizer"); - writer.WriteStringValue(Tokenizer.ToString()); + writer.WriteStringValue(TokenizerName.ToString()); if (TokenFilters != null && TokenFilters.Any()) { writer.WritePropertyName("tokenFilters"); diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.cs index 240bc7a5fd292..d04776fad06a0 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/CustomAnalyzer.cs @@ -13,37 +13,19 @@ namespace Azure.Search.Documents.Indexes.Models /// Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. public partial class CustomAnalyzer : LexicalAnalyzer { - /// Initializes a new instance of CustomAnalyzer. - /// The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. - public CustomAnalyzer(string name, LexicalTokenizerName tokenizer) : base(name) - { - if (name == null) - { - throw new ArgumentNullException(nameof(name)); - } - - Tokenizer = tokenizer; - TokenFilters = new List(); - CharFilters = new List(); - ODataType = "#Microsoft.Azure.Search.CustomAnalyzer"; - } /// Initializes a new instance of CustomAnalyzer. /// Identifies the concrete type of the analyzer. /// The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. - /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. + /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. /// A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. /// A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. - internal CustomAnalyzer(string oDataType, string name, LexicalTokenizerName tokenizer, IList tokenFilters, IList charFilters) : base(oDataType, name) + internal CustomAnalyzer(string oDataType, string name, LexicalTokenizerName tokenizerName, IList tokenFilters, IList charFilters) : base(oDataType, name) { - Tokenizer = tokenizer; + TokenizerName = tokenizerName; TokenFilters = tokenFilters ?? new List(); CharFilters = charFilters ?? new List(); ODataType = oDataType ?? "#Microsoft.Azure.Search.CustomAnalyzer"; } - - /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. - public LexicalTokenizerName Tokenizer { get; set; } } } diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs index 75d0e84f60d70..1892b02af8069 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/Models/AnalyzeTextOptions.cs @@ -14,12 +14,12 @@ public partial class AnalyzeTextOptions /// Initializes a new instance of AnalyzeRequest. /// /// Required text to break into tokens. - /// The name of the analyzer to use to break the given . + /// The name of the analyzer to use to break the given . /// is null. - public AnalyzeTextOptions(string text, LexicalAnalyzerName analyzer) + public AnalyzeTextOptions(string text, LexicalAnalyzerName analyzerName) { Text = text ?? throw new ArgumentNullException(nameof(text)); - Analyzer = analyzer; + AnalyzerName = analyzerName; TokenFilters = new List(); CharFilters = new List(); @@ -29,12 +29,12 @@ public AnalyzeTextOptions(string text, LexicalAnalyzerName analyzer) /// Initializes a new instance of AnalyzeRequest. /// /// Required text to break into tokens. - /// The name of the tokenizer to use to break the given . + /// The name of the tokenizer to use to break the given . /// is null. - public AnalyzeTextOptions(string text, LexicalTokenizerName tokenizer) + public AnalyzeTextOptions(string text, LexicalTokenizerName tokenizerName) { Text = text ?? throw new ArgumentNullException(nameof(text)); - Tokenizer = tokenizer; + TokenizerName = tokenizerName; TokenFilters = new List(); CharFilters = new List(); @@ -42,11 +42,11 @@ public AnalyzeTextOptions(string text, LexicalTokenizerName tokenizer) /// The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. [CodeGenMember("Analyzer")] - public LexicalAnalyzerName? Analyzer { get; } + public LexicalAnalyzerName? AnalyzerName { get; } /// The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. [CodeGenMember("Tokenizer")] - public LexicalTokenizerName? Tokenizer { get; } + public LexicalTokenizerName? TokenizerName { get; } /// An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/CustomAnalyzer.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/CustomAnalyzer.cs index bb60990ae83de..3613759ebeb27 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/Models/CustomAnalyzer.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/Models/CustomAnalyzer.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +using System; using System.Collections.Generic; using Azure.Core; @@ -8,6 +9,22 @@ namespace Azure.Search.Documents.Indexes.Models { public partial class CustomAnalyzer { + /// Initializes a new instance of CustomAnalyzer. + /// The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. + /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. + /// is null. + public CustomAnalyzer(string name, LexicalTokenizerName tokenizerName) : base(name) + { + TokenizerName = tokenizerName; + TokenFilters = new List(); + CharFilters = new List(); + ODataType = "#Microsoft.Azure.Search.CustomAnalyzer"; + } + + /// The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. + [CodeGenMember("Tokenizer")] + public LexicalTokenizerName TokenizerName { get; set; } + /// A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. [CodeGenMember(EmptyAsUndefined = true, Initialize = true)] public IList TokenFilters { get; } From a53317083308375689da7ccd9b25bbe6aaf8a39a Mon Sep 17 00:00:00 2001 From: Heath Stewart Date: Thu, 4 Jun 2020 19:46:28 -0700 Subject: [PATCH 4/5] Removed the SynonymMap.Format property Only "solr" is supported, and this makes .NET consistent with the other languages. --- sdk/search/Azure.Search.Documents/CHANGELOG.md | 5 +++-- .../api/Azure.Search.Documents.netstandard2.0.cs | 1 - .../src/Generated/Models/SynonymMap.Serialization.cs | 11 +++++++++-- .../src/Generated/Models/SynonymMap.cs | 2 -- .../src/Indexes/Models/SynonymMap.cs | 7 +++++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/sdk/search/Azure.Search.Documents/CHANGELOG.md b/sdk/search/Azure.Search.Documents/CHANGELOG.md index 705e3c250764e..0889564e0319d 100644 --- a/sdk/search/Azure.Search.Documents/CHANGELOG.md +++ b/sdk/search/Azure.Search.Documents/CHANGELOG.md @@ -4,15 +4,16 @@ ### Breaking Changes +- Made collection- and dictionary-type properties read-only, i.e. has only get-accessors, based on [.NET Guidelines][net-guidelines-collection-properties]. - Moved models for managing indexes, indexers, and skillsets to `Azure.Search.Documents.Indexes.Models`. -- Split `SearchServiceClient` into `SearchIndexClient` for managing indexes, and `SearchIndexerClient` for managing indexers, both of which are now in `Azure.Search.Documents.Indexes`. +- Removed the `SynonymMap.Format` property since only the "solr" format is supported currently. - Renamed `AnalyzeRequest` to `AnalyzeTextOptions`, and overloaded constructors with required parameters. - Renamed `AnalyzeTextOptions.Analyzer` to `AnalyzeTextOptions.AnalyzerName`. - Renamed `AnalyzeTextOptions.Tokenizer` to `AnalyzeTextOptions.TokenizerName`. - Renamed `CustomAnalyzer.Tokenizer` to `CustomAnalyzer.TokenizerName`. - Renamed `SearchIndexerDataSource` to `SearchIndexerDataSourceConnection`. - Renamed methods on `SearchIndexerClient` matching "\*DataSource" to "\*DataSourceConnection". -- Made collection- and dictionary-type properties read-only, i.e. has only get-accessors, based on [.NET Guidelines][net-guidelines-collection-properties]. +- Split `SearchServiceClient` into `SearchIndexClient` for managing indexes, and `SearchIndexerClient` for managing indexers, both of which are now in `Azure.Search.Documents.Indexes`. ## 1.0.0-preview.3 (2020-05-05) diff --git a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs index c0bb76bf9579f..4d74f94b3fac3 100644 --- a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs +++ b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs @@ -1584,7 +1584,6 @@ public SynonymMap(string name, System.IO.TextReader reader) { } public SynonymMap(string name, string synonyms) { } public Azure.Search.Documents.Indexes.Models.SearchResourceEncryptionKey EncryptionKey { get { throw null; } set { } } public Azure.ETag? ETag { get { throw null; } set { } } - public string Format { get { throw null; } set { } } public string Name { get { throw null; } set { } } public string Synonyms { get { throw null; } set { } } } diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.Serialization.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.Serialization.cs index 980f61fc67ade..1e92dcff4768e 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.Serialization.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.Serialization.cs @@ -17,8 +17,11 @@ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) writer.WriteStartObject(); writer.WritePropertyName("name"); writer.WriteStringValue(Name); - writer.WritePropertyName("format"); - writer.WriteStringValue(Format); + if (Format != null) + { + writer.WritePropertyName("format"); + writer.WriteStringValue(Format); + } writer.WritePropertyName("synonyms"); writer.WriteStringValue(Synonyms); if (EncryptionKey != null) @@ -50,6 +53,10 @@ internal static SynonymMap DeserializeSynonymMap(JsonElement element) } if (property.NameEquals("format")) { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } format = property.Value.GetString(); continue; } diff --git a/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.cs b/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.cs index 68f6f1d2c4f3d..6fdbfcdf5c77b 100644 --- a/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.cs +++ b/sdk/search/Azure.Search.Documents/src/Generated/Models/SynonymMap.cs @@ -30,8 +30,6 @@ internal SynonymMap(string name, string format, string synonyms, SearchResourceE /// The name of the synonym map. public string Name { get; set; } - /// The format of the synonym map. Only the 'solr' format is currently supported. - public string Format { get; set; } /// A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. public string Synonyms { get; set; } /// A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/Models/SynonymMap.cs b/sdk/search/Azure.Search.Documents/src/Indexes/Models/SynonymMap.cs index 7ffe6e413a979..8e441a7cf376b 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/Models/SynonymMap.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/Models/SynonymMap.cs @@ -23,7 +23,7 @@ public partial class SynonymMap /// The name of the synonym map. /// /// The formatted synonyms string to define. - /// Because only the Solr synonym map format is currently supported, these are values delimited by "\n". + /// Because only the "solr" synonym map format is currently supported, these are values delimited by "\n". /// /// or is an empty string. /// or is null. @@ -43,7 +43,7 @@ public SynonymMap(string name, string synonyms) /// The name of the synonym map. /// /// A from which formatted synonyms are read. - /// Because only the Solr synonym map format is currently supported, these are values delimited by "\n". + /// Because only the "solr" synonym map format is currently supported, these are values delimited by "\n". /// /// is an empty string. /// or is null. @@ -65,5 +65,8 @@ public ETag? ETag get => _etag is null ? (ETag?)null : new ETag(_etag); set => _etag = value?.ToString(); } + + /// The format of the synonym map. Only the "solr" format is currently supported. + internal string Format { get; set; } } } From 6b1145ce2d736f0f711b166b4aa1868024a7e7b3 Mon Sep 17 00:00:00 2001 From: Heath Stewart Date: Thu, 4 Jun 2020 20:28:39 -0700 Subject: [PATCH 5/5] Resolve PR feedback --- .../api/Azure.Search.Documents.netstandard2.0.cs | 4 ++-- .../src/Indexes/SearchIndexClient.cs | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs index 4d74f94b3fac3..bac910eae60c8 100644 --- a/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs +++ b/sdk/search/Azure.Search.Documents/api/Azure.Search.Documents.netstandard2.0.cs @@ -109,8 +109,8 @@ public SearchIndexClient(System.Uri endpoint, Azure.AzureKeyCredential credentia public SearchIndexClient(System.Uri endpoint, Azure.AzureKeyCredential credential, Azure.Search.Documents.SearchClientOptions options) { } public virtual System.Uri Endpoint { get { throw null; } } public virtual string ServiceName { get { throw null; } } - public virtual Azure.Response> AnalyzeText(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextOptions request, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } - public virtual System.Threading.Tasks.Task>> AnalyzeTextAsync(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextOptions request, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response> AnalyzeText(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task>> AnalyzeTextAsync(string indexName, Azure.Search.Documents.Indexes.Models.AnalyzeTextOptions options, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateIndex(Azure.Search.Documents.Indexes.Models.SearchIndex index, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual System.Threading.Tasks.Task> CreateIndexAsync(Azure.Search.Documents.Indexes.Models.SearchIndex index, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } public virtual Azure.Response CreateOrUpdateIndex(Azure.Search.Documents.Indexes.Models.SearchIndex index, bool allowIndexDowntime = false, bool onlyIfUnchanged = false, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } diff --git a/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs b/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs index 88f1d1315d6bc..6ee47e9acb8d8 100644 --- a/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs +++ b/sdk/search/Azure.Search.Documents/src/Indexes/SearchIndexClient.cs @@ -227,16 +227,16 @@ public virtual async Task> GetServiceStatistic /// Shows how an analyzer breaks text into tokens. /// /// The name of the index used to test an analyzer. - /// The containing the text and analyzer or analyzer components to test. + /// The containing the text and analyzer or analyzer components to test. /// Optional to propagate notifications that the operation should be canceled. /// /// The from the server containing a list of for analyzed text. /// - /// Thrown when or is null. + /// Thrown when or is null. /// Thrown when a failure is returned by the Search service. public virtual Response> AnalyzeText( string indexName, - AnalyzeTextOptions request, + AnalyzeTextOptions options, CancellationToken cancellationToken = default) { using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(SearchIndexClient)}.{nameof(AnalyzeText)}"); @@ -245,7 +245,7 @@ public virtual Response> AnalyzeText( { Response result = IndexesClient.Analyze( indexName, - request, + options, cancellationToken); return Response.FromValue(result.Value.Tokens, result.GetRawResponse()); @@ -261,16 +261,16 @@ public virtual Response> AnalyzeText( /// Shows how an analyzer breaks text into tokens. /// /// The name of the index used to test an analyzer. - /// The containing the text and analyzer or analyzer components to test. + /// The containing the text and analyzer or analyzer components to test. /// Optional to propagate notifications that the operation should be canceled. /// /// The from the server containing a list of for analyzed text. /// - /// Thrown when or is null. + /// Thrown when or is null. /// Thrown when a failure is returned by the Search service. public virtual async Task>> AnalyzeTextAsync( string indexName, - AnalyzeTextOptions request, + AnalyzeTextOptions options, CancellationToken cancellationToken = default) { using DiagnosticScope scope = _clientDiagnostics.CreateScope($"{nameof(SearchIndexClient)}.{nameof(AnalyzeText)}"); @@ -279,7 +279,7 @@ public virtual async Task>> AnalyzeTex { Response result = await IndexesClient.AnalyzeAsync( indexName, - request, + options, cancellationToken) .ConfigureAwait(false);